blob: 85812489000b9d5208bc3d0d402922d73d15d974 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hallab611372018-02-21 15:26:05 -080031 main.topoMappings = {}
Jon Hall57b50432015-10-22 10:20:10 -070032
Devin Lim58046fa2017-07-05 16:55:00 -070033 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070034 # copy gen-partions file to ONOS
35 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hallab611372018-02-21 15:26:05 -080036 srcFile = main.testsRoot + "/HA/dependencies/onos-gen-partitions"
Devin Lim58046fa2017-07-05 16:55:00 -070037 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
38 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
39 main.ONOSbench.ip_address,
40 srcFile,
41 dstDir,
42 pwd=main.ONOSbench.pwd,
43 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070044
Devin Lim58046fa2017-07-05 16:55:00 -070045 def cleanUpGenPartition( self ):
46 # clean up gen-partitions file
47 try:
48 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
51 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
Jon Hall0e240372018-05-02 11:21:57 -070052 main.log.info( "Cleaning custom gen partitions file, response was: \n" +
Devin Lim58046fa2017-07-05 16:55:00 -070053 str( main.ONOSbench.handle.before ) )
54 except ( pexpect.TIMEOUT, pexpect.EOF ):
55 main.log.exception( "ONOSbench: pexpect exception found:" +
56 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070057 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070058
Devin Lim58046fa2017-07-05 16:55:00 -070059 def startingMininet( self ):
60 main.step( "Starting Mininet" )
61 # scp topo file to mininet
62 # TODO: move to params?
63 topoName = "obelisk.py"
64 filePath = main.ONOSbench.home + "/tools/test/topos/"
65 main.ONOSbench.scp( main.Mininet1,
66 filePath + topoName,
67 main.Mininet1.home,
68 direction="to" )
69 mnResult = main.Mininet1.startNet()
70 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
71 onpass="Mininet Started",
72 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070073
Devin Lim58046fa2017-07-05 16:55:00 -070074 def scalingMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070075 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070076 main.scaling = main.params[ 'scaling' ].split( "," )
77 main.log.debug( main.scaling )
78 scale = main.scaling.pop( 0 )
79 main.log.debug( scale )
Jon Hallab611372018-02-21 15:26:05 -080080 if "b" in scale:
Devin Lim58046fa2017-07-05 16:55:00 -070081 equal = True
82 else:
83 equal = False
84 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070085 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
86 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070087 utilities.assert_equals( expect=main.TRUE, actual=genResult,
88 onpass="New cluster metadata file generated",
89 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070090
Devin Lim58046fa2017-07-05 16:55:00 -070091 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070092 main.step( "Generate initial metadata file" )
93 if main.Cluster.numCtrls >= 5:
94 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070095 else:
96 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070097 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070098 utilities.assert_equals( expect=main.TRUE, actual=genResult,
99 onpass="New cluster metadata file generated",
100 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700101
Devin Lim142b5342017-07-20 15:22:39 -0700102 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700103 import os
104 main.step( "Setup server for cluster metadata file" )
105 main.serverPort = main.params[ 'server' ][ 'port' ]
106 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
107 main.log.debug( "Root dir: {}".format( rootDir ) )
108 status = main.Server.start( main.ONOSbench,
109 rootDir,
110 port=main.serverPort,
111 logDir=main.logdir + "/server.log" )
112 utilities.assert_equals( expect=main.TRUE, actual=status,
113 onpass="Server started",
114 onfail="Failled to start SimpleHTTPServer" )
115
Jon Hall4f360bc2017-09-07 10:19:52 -0700116 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700117 main.step( "Copying backup config files" )
118 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
119 cp = main.ONOSbench.scp( main.ONOSbench,
120 main.onosServicepath,
121 main.onosServicepath + ".backup",
122 direction="to" )
123
124 utilities.assert_equals( expect=main.TRUE,
125 actual=cp,
126 onpass="Copy backup config file succeeded",
127 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700128
129 def setMetadataUrl( self ):
130 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700131 # we need to modify the onos-service file to use remote metadata file
132 # url for cluster metadata file
133 iface = main.params[ 'server' ].get( 'interface' )
134 ip = main.ONOSbench.getIpAddr( iface=iface )
135 metaFile = "cluster.json"
136 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
Devin Lim58046fa2017-07-05 16:55:00 -0700137 main.log.warn( repr( javaArgs ) )
138 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700139 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
140 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
Jon Hall0e240372018-05-02 11:21:57 -0700189 onosCounters.append( {} )
Jon Halla440e872016-03-31 15:15:50 -0700190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Jon Hall0e240372018-05-02 11:21:57 -0700202 node = main.Cluster.active( controller[ 0 ] )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
Jon Hall0e240372018-05-02 11:21:57 -0700214 main.log.error( "ONOS nodes have different values for counters: %s",
Jon Halla440e872016-03-31 15:15:50 -0700215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -0700232 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700234 onosValue = None
235 try:
236 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700237 except AttributeError:
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
Jon Hall0e240372018-05-02 11:21:57 -0700242 main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
Jon Halla440e872016-03-31 15:15:50 -0700243 else:
Jon Hall0e240372018-05-02 11:21:57 -0700244 main.log.error( node + ": " + counterName +
Jon Hall41d39f12016-04-11 22:54:35 -0700245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
Jon Hall41d39f12016-04-11 22:54:35 -0700266 if result:
267 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700268 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700269 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
270 return ( result, leaderList )
271
Devin Lim58046fa2017-07-05 16:55:00 -0700272 def initialSetUp( self, serviceClean=False ):
273 """
274 rest of initialSetup
275 """
Devin Lim58046fa2017-07-05 16:55:00 -0700276 if main.params[ 'tcpdump' ].lower() == "true":
277 main.step( "Start Packet Capture MN" )
278 main.Mininet2.startTcpdump(
279 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
280 + "-MN.pcap",
281 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
282 port=main.params[ 'MNtcpdump' ][ 'port' ] )
283
284 if serviceClean:
285 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700286 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
287 main.ONOSbench.handle.expect( "\$" )
288 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
289 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700290
291 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800292 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700293 False,
Jon Hall5d5876e2017-11-30 09:33:16 -0800294 attempts=9 )
Devin Lim58046fa2017-07-05 16:55:00 -0700295
296 utilities.assert_equals( expect=True, actual=nodeResults,
297 onpass="Nodes check successful",
298 onfail="Nodes check NOT successful" )
299
300 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700301 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700302 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700303 ctrl.name,
304 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700305 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700306 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700307
308 main.step( "Activate apps defined in the params file" )
309 # get data from the params
310 apps = main.params.get( 'apps' )
311 if apps:
312 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700313 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700314 activateResult = True
315 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700316 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700317 # TODO: check this worked
318 time.sleep( 10 ) # wait for apps to activate
319 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700320 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700321 if state == "ACTIVE":
322 activateResult = activateResult and True
323 else:
324 main.log.error( "{} is in {} state".format( app, state ) )
325 activateResult = False
326 utilities.assert_equals( expect=True,
327 actual=activateResult,
328 onpass="Successfully activated apps",
329 onfail="Failed to activate apps" )
330 else:
331 main.log.warn( "No apps were specified to be loaded after startup" )
332
333 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700334 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700335 config = main.params.get( 'ONOS_Configuration' )
336 if config:
337 main.log.debug( config )
338 checkResult = main.TRUE
339 for component in config:
340 for setting in config[ component ]:
341 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700342 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700343 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
344 checkResult = check and checkResult
345 utilities.assert_equals( expect=main.TRUE,
346 actual=checkResult,
347 onpass="Successfully set config",
348 onfail="Failed to set config" )
349 else:
350 main.log.warn( "No configurations were specified to be changed after startup" )
351
Jon Hallca319892017-06-15 15:25:22 -0700352 main.step( "Check app ids" )
353 appCheck = self.appCheck()
354 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700355 onpass="App Ids seem to be correct",
356 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700357
Jon Hallca319892017-06-15 15:25:22 -0700358 def commonChecks( self ):
359 # TODO: make this assertable or assert in here?
360 self.topicsCheck()
361 self.partitionsCheck()
362 self.pendingMapCheck()
363 self.appCheck()
364
365 def topicsCheck( self, extraTopics=[] ):
366 """
367 Check for work partition topics in leaders output
368 """
369 leaders = main.Cluster.next().leaders()
370 missing = False
371 try:
372 if leaders:
373 parsedLeaders = json.loads( leaders )
374 output = json.dumps( parsedLeaders,
375 sort_keys=True,
376 indent=4,
377 separators=( ',', ': ' ) )
Jon Hallca319892017-06-15 15:25:22 -0700378 # check for all intent partitions
379 topics = []
380 for i in range( 14 ):
381 topics.append( "work-partition-" + str( i ) )
382 topics += extraTopics
Jon Hallca319892017-06-15 15:25:22 -0700383 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
384 for topic in topics:
385 if topic not in ONOStopics:
386 main.log.error( "Error: " + topic +
387 " not in leaders" )
388 missing = True
389 else:
390 main.log.error( "leaders() returned None" )
391 except ( ValueError, TypeError ):
392 main.log.exception( "Error parsing leaders" )
393 main.log.error( repr( leaders ) )
394 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700395 # NOTE Can we refactor this into the Cluster class?
396 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700397 for ctrl in main.Cluster.active():
398 response = ctrl.CLI.leaders( jsonFormat=False )
399 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
400 str( response ) )
401 return missing
402
403 def partitionsCheck( self ):
404 # TODO: return something assertable
405 partitions = main.Cluster.next().partitions()
406 try:
407 if partitions:
408 parsedPartitions = json.loads( partitions )
409 output = json.dumps( parsedPartitions,
410 sort_keys=True,
411 indent=4,
412 separators=( ',', ': ' ) )
413 main.log.debug( "Partitions: " + output )
414 # TODO check for a leader in all paritions
415 # TODO check for consistency among nodes
416 else:
417 main.log.error( "partitions() returned None" )
418 except ( ValueError, TypeError ):
419 main.log.exception( "Error parsing partitions" )
420 main.log.error( repr( partitions ) )
421
422 def pendingMapCheck( self ):
423 pendingMap = main.Cluster.next().pendingMap()
424 try:
425 if pendingMap:
426 parsedPending = json.loads( pendingMap )
427 output = json.dumps( parsedPending,
428 sort_keys=True,
429 indent=4,
430 separators=( ',', ': ' ) )
431 main.log.debug( "Pending map: " + output )
432 # TODO check something here?
433 else:
434 main.log.error( "pendingMap() returned None" )
435 except ( ValueError, TypeError ):
436 main.log.exception( "Error parsing pending map" )
437 main.log.error( repr( pendingMap ) )
438
439 def appCheck( self ):
440 """
441 Check App IDs on all nodes
442 """
443 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800444 for i in range( 15 ):
445 # TODO modify retry or add a new version that accepts looking for
446 # a value in a return list instead of needing to match the entire
447 # return value to retry
448 appResults = main.Cluster.command( "appToIDCheck" )
449 appCheck = all( i == main.TRUE for i in appResults )
450 if appCheck:
451 break
452 else:
453 time.sleep( 5 )
454
Jon Hallca319892017-06-15 15:25:22 -0700455 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700456 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800457 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
458 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700459 return appCheck
460
Jon Halle0f0b342017-04-18 11:43:47 -0700461 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
462 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700463 completedValues = main.Cluster.command( "workQueueTotalCompleted",
464 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700465 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700466 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700467 completedResult = all( completedResults )
468 if not completedResult:
469 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
470 workQueueName, completed, completedValues ) )
471
472 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700473 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
474 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700475 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700476 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700477 inProgressResult = all( inProgressResults )
478 if not inProgressResult:
479 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
480 workQueueName, inProgress, inProgressValues ) )
481
482 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700483 pendingValues = main.Cluster.command( "workQueueTotalPending",
484 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700485 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700486 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700487 pendingResult = all( pendingResults )
488 if not pendingResult:
489 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
490 workQueueName, pending, pendingValues ) )
491 return completedResult and inProgressResult and pendingResult
492
Devin Lim58046fa2017-07-05 16:55:00 -0700493 def assignDevices( self, main ):
494 """
495 Assign devices to controllers
496 """
497 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700498 assert main, "main not defined"
499 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700500
501 main.case( "Assigning devices to controllers" )
502 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
503 "and check that an ONOS node becomes the " + \
504 "master of the device."
505 main.step( "Assign switches to controllers" )
506
Jon Hallca319892017-06-15 15:25:22 -0700507 ipList = main.Cluster.getIps()
Jon Hallab611372018-02-21 15:26:05 -0800508 swList = main.Mininet1.getSwitches().keys()
Devin Lim58046fa2017-07-05 16:55:00 -0700509 main.Mininet1.assignSwController( sw=swList, ip=ipList )
510
511 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -0800512 for switch in swList:
513 response = main.Mininet1.getSwController( switch )
Devin Lim58046fa2017-07-05 16:55:00 -0700514 try:
515 main.log.info( str( response ) )
Jon Hallab611372018-02-21 15:26:05 -0800516 for ctrl in main.Cluster.runningNodes:
517 if re.search( "tcp:" + ctrl.ipAddress, response ):
518 mastershipCheck = mastershipCheck and main.TRUE
519 else:
520 main.log.error( "Error, node " + repr( ctrl ) + " is " +
521 "not in the list of controllers " +
522 switch + " is connecting to." )
523 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700524 except Exception:
Jon Hallab611372018-02-21 15:26:05 -0800525 main.log.warn( "Error parsing get-controller response" )
526 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700527 utilities.assert_equals(
528 expect=main.TRUE,
529 actual=mastershipCheck,
530 onpass="Switch mastership assigned correctly",
531 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700532
Jon Hallab611372018-02-21 15:26:05 -0800533 # Mappings for attachmentPoints from host mac to deviceID
534 # TODO: make the key a dict with deviceIds and port #'s
535 # FIXME: topo-HA/obelisk specific mappings:
536 # key is mac and value is dpid
537 main.topoMappings = {}
538 for i in range( 1, 29 ): # hosts 1 through 28
539 # set up correct variables:
540 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
541 if i == 1:
542 deviceId = "1000".zfill( 16 )
543 elif i == 2:
544 deviceId = "2000".zfill( 16 )
545 elif i == 3:
546 deviceId = "3000".zfill( 16 )
547 elif i == 4:
548 deviceId = "3004".zfill( 16 )
549 elif i == 5:
550 deviceId = "5000".zfill( 16 )
551 elif i == 6:
552 deviceId = "6000".zfill( 16 )
553 elif i == 7:
554 deviceId = "6007".zfill( 16 )
555 elif i >= 8 and i <= 17:
556 dpid = '3' + str( i ).zfill( 3 )
557 deviceId = dpid.zfill( 16 )
558 elif i >= 18 and i <= 27:
559 dpid = '6' + str( i ).zfill( 3 )
560 deviceId = dpid.zfill( 16 )
561 elif i == 28:
562 deviceId = "2800".zfill( 16 )
563 main.topoMappings[ macId ] = deviceId
564
Devin Lim58046fa2017-07-05 16:55:00 -0700565 def assignIntents( self, main ):
566 """
567 Assign intents
568 """
569 import time
570 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700571 assert main, "main not defined"
572 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700573 try:
574 main.HAlabels
575 except ( NameError, AttributeError ):
576 main.log.error( "main.HAlabels not defined, setting to []" )
577 main.HAlabels = []
578 try:
579 main.HAdata
580 except ( NameError, AttributeError ):
581 main.log.error( "data not defined, setting to []" )
582 main.HAdata = []
583 main.case( "Adding host Intents" )
584 main.caseExplanation = "Discover hosts by using pingall then " +\
585 "assign predetermined host-to-host intents." +\
586 " After installation, check that the intent" +\
587 " is distributed to all nodes and the state" +\
588 " is INSTALLED"
589
590 # install onos-app-fwd
591 main.step( "Install reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700592 installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700593 utilities.assert_equals( expect=main.TRUE, actual=installResults,
594 onpass="Install fwd successful",
595 onfail="Install fwd failed" )
596
597 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700598 appCheck = self.appCheck()
599 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700600 onpass="App Ids seem to be correct",
601 onfail="Something is wrong with app Ids" )
602
603 main.step( "Discovering Hosts( Via pingall for now )" )
604 # FIXME: Once we have a host discovery mechanism, use that instead
605 # REACTIVE FWD test
606 pingResult = main.FALSE
607 passMsg = "Reactive Pingall test passed"
608 time1 = time.time()
609 pingResult = main.Mininet1.pingall()
610 time2 = time.time()
611 if not pingResult:
612 main.log.warn( "First pingall failed. Trying again..." )
613 pingResult = main.Mininet1.pingall()
614 passMsg += " on the second try"
615 utilities.assert_equals(
616 expect=main.TRUE,
617 actual=pingResult,
618 onpass=passMsg,
619 onfail="Reactive Pingall failed, " +
620 "one or more ping pairs failed" )
621 main.log.info( "Time for pingall: %2f seconds" %
622 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700623 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700624 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700625 # timeout for fwd flows
626 time.sleep( 11 )
627 # uninstall onos-app-fwd
628 main.step( "Uninstall reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700629 uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700630 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
631 onpass="Uninstall fwd successful",
632 onfail="Uninstall fwd failed" )
633
634 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700635 appCheck2 = self.appCheck()
636 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700637 onpass="App Ids seem to be correct",
638 onfail="Something is wrong with app Ids" )
639
640 main.step( "Add host intents via cli" )
641 intentIds = []
642 # TODO: move the host numbers to params
643 # Maybe look at all the paths we ping?
644 intentAddResult = True
645 hostResult = main.TRUE
646 for i in range( 8, 18 ):
647 main.log.info( "Adding host intent between h" + str( i ) +
648 " and h" + str( i + 10 ) )
649 host1 = "00:00:00:00:00:" + \
650 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
651 host2 = "00:00:00:00:00:" + \
652 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
653 # NOTE: getHost can return None
Jon Hall0e240372018-05-02 11:21:57 -0700654 host1Dict = main.Cluster.next().CLI.getHost( host1 )
655 host2Dict = main.Cluster.next().CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700656 host1Id = None
657 host2Id = None
658 if host1Dict and host2Dict:
659 host1Id = host1Dict.get( 'id', None )
660 host2Id = host2Dict.get( 'id', None )
661 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700662 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700663 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700664 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700665 if tmpId:
666 main.log.info( "Added intent with id: " + tmpId )
667 intentIds.append( tmpId )
668 else:
669 main.log.error( "addHostIntent returned: " +
670 repr( tmpId ) )
671 else:
672 main.log.error( "Error, getHost() failed for h" + str( i ) +
673 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700674 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700675 try:
Jon Hallca319892017-06-15 15:25:22 -0700676 output = json.dumps( json.loads( hosts ),
677 sort_keys=True,
678 indent=4,
679 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700680 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700681 output = repr( hosts )
682 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700683 hostResult = main.FALSE
684 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
685 onpass="Found a host id for each host",
686 onfail="Error looking up host ids" )
687
688 intentStart = time.time()
Jon Hall0e240372018-05-02 11:21:57 -0700689 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700690 main.log.info( "Submitted intents: " + str( intentIds ) )
691 main.log.info( "Intents in ONOS: " + str( onosIds ) )
692 for intent in intentIds:
693 if intent in onosIds:
694 pass # intent submitted is in onos
695 else:
696 intentAddResult = False
697 if intentAddResult:
698 intentStop = time.time()
699 else:
700 intentStop = None
701 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700702 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700703 intentStates = []
704 installedCheck = True
705 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
706 count = 0
707 try:
708 for intent in json.loads( intents ):
709 state = intent.get( 'state', None )
710 if "INSTALLED" not in state:
711 installedCheck = False
712 intentId = intent.get( 'id', None )
713 intentStates.append( ( intentId, state ) )
714 except ( ValueError, TypeError ):
715 main.log.exception( "Error parsing intents" )
716 # add submitted intents not in the store
717 tmplist = [ i for i, s in intentStates ]
718 missingIntents = False
719 for i in intentIds:
720 if i not in tmplist:
721 intentStates.append( ( i, " - " ) )
722 missingIntents = True
723 intentStates.sort()
724 for i, s in intentStates:
725 count += 1
726 main.log.info( "%-6s%-15s%-15s" %
727 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700728 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700729
730 intentAddResult = bool( intentAddResult and not missingIntents and
731 installedCheck )
732 if not intentAddResult:
733 main.log.error( "Error in pushing host intents to ONOS" )
734
735 main.step( "Intent Anti-Entropy dispersion" )
736 for j in range( 100 ):
737 correct = True
738 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700739 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700740 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700741 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700742 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700743 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700744 str( sorted( onosIds ) ) )
745 if sorted( ids ) != sorted( intentIds ):
746 main.log.warn( "Set of intent IDs doesn't match" )
747 correct = False
748 break
749 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700750 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700751 for intent in intents:
752 if intent[ 'state' ] != "INSTALLED":
753 main.log.warn( "Intent " + intent[ 'id' ] +
754 " is " + intent[ 'state' ] )
755 correct = False
756 break
757 if correct:
758 break
759 else:
760 time.sleep( 1 )
761 if not intentStop:
762 intentStop = time.time()
763 global gossipTime
764 gossipTime = intentStop - intentStart
765 main.log.info( "It took about " + str( gossipTime ) +
766 " seconds for all intents to appear in each node" )
767 append = False
768 title = "Gossip Intents"
769 count = 1
770 while append is False:
771 curTitle = title + str( count )
772 if curTitle not in main.HAlabels:
773 main.HAlabels.append( curTitle )
774 main.HAdata.append( str( gossipTime ) )
775 append = True
776 else:
777 count += 1
778 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700779 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700780 utilities.assert_greater_equals(
781 expect=maxGossipTime, actual=gossipTime,
782 onpass="ECM anti-entropy for intents worked within " +
783 "expected time",
784 onfail="Intent ECM anti-entropy took too long. " +
785 "Expected time:{}, Actual time:{}".format( maxGossipTime,
786 gossipTime ) )
787 if gossipTime <= maxGossipTime:
788 intentAddResult = True
789
Jon Hallca319892017-06-15 15:25:22 -0700790 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700791 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700792 installedCheck = True
793 main.log.info( "Sleeping 60 seconds to see if intents are found" )
794 time.sleep( 60 )
Jon Hall0e240372018-05-02 11:21:57 -0700795 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700796 main.log.info( "Submitted intents: " + str( intentIds ) )
797 main.log.info( "Intents in ONOS: " + str( onosIds ) )
798 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700799 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700800 intentStates = []
801 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
802 count = 0
803 try:
804 for intent in json.loads( intents ):
805 # Iter through intents of a node
806 state = intent.get( 'state', None )
807 if "INSTALLED" not in state:
808 installedCheck = False
809 intentId = intent.get( 'id', None )
810 intentStates.append( ( intentId, state ) )
811 except ( ValueError, TypeError ):
812 main.log.exception( "Error parsing intents" )
813 # add submitted intents not in the store
814 tmplist = [ i for i, s in intentStates ]
815 for i in intentIds:
816 if i not in tmplist:
817 intentStates.append( ( i, " - " ) )
818 intentStates.sort()
819 for i, s in intentStates:
820 count += 1
821 main.log.info( "%-6s%-15s%-15s" %
822 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700823 self.topicsCheck( [ "org.onosproject.election" ] )
824 self.partitionsCheck()
825 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700826
Jon Hallca319892017-06-15 15:25:22 -0700827 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700828 """
829 Ping across added host intents
830 """
831 import json
832 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700833 assert main, "main not defined"
834 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700835 main.case( "Verify connectivity by sending traffic across Intents" )
836 main.caseExplanation = "Ping across added host intents to check " +\
837 "functionality and check the state of " +\
838 "the intent"
839
Jon Hallca319892017-06-15 15:25:22 -0700840 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700841 main.step( "Check Intent state" )
842 installedCheck = False
843 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800844 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700845 installedCheck = True
846 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700847 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700848 intentStates = []
849 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
850 count = 0
851 # Iter through intents of a node
852 try:
853 for intent in json.loads( intents ):
854 state = intent.get( 'state', None )
855 if "INSTALLED" not in state:
856 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700857 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700858 intentId = intent.get( 'id', None )
859 intentStates.append( ( intentId, state ) )
860 except ( ValueError, TypeError ):
861 main.log.exception( "Error parsing intents." )
862 # Print states
863 intentStates.sort()
864 for i, s in intentStates:
865 count += 1
866 main.log.info( "%-6s%-15s%-15s" %
867 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700868 if not installedCheck:
869 time.sleep( 1 )
870 loopCount += 1
871 utilities.assert_equals( expect=True, actual=installedCheck,
872 onpass="Intents are all INSTALLED",
873 onfail="Intents are not all in " +
874 "INSTALLED state" )
875
876 main.step( "Ping across added host intents" )
877 PingResult = main.TRUE
878 for i in range( 8, 18 ):
879 ping = main.Mininet1.pingHost( src="h" + str( i ),
880 target="h" + str( i + 10 ) )
881 PingResult = PingResult and ping
882 if ping == main.FALSE:
883 main.log.warn( "Ping failed between h" + str( i ) +
884 " and h" + str( i + 10 ) )
885 elif ping == main.TRUE:
886 main.log.info( "Ping test passed!" )
887 # Don't set PingResult or you'd override failures
888 if PingResult == main.FALSE:
889 main.log.error(
890 "Intents have not been installed correctly, pings failed." )
891 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700892 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700893 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700894 output = json.dumps( json.loads( tmpIntents ),
895 sort_keys=True,
896 indent=4,
897 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700898 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700899 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700900 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700901 utilities.assert_equals(
902 expect=main.TRUE,
903 actual=PingResult,
904 onpass="Intents have been installed correctly and pings work",
905 onfail="Intents have not been installed correctly, pings failed." )
906
907 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700908 topicsCheck = self.topicsCheck()
909 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700910 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700911 onfail="Some topics were lost" )
912 self.partitionsCheck()
913 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700914
915 if not installedCheck:
916 main.log.info( "Waiting 60 seconds to see if the state of " +
917 "intents change" )
918 time.sleep( 60 )
919 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700920 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700921 intentStates = []
922 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
923 count = 0
924 # Iter through intents of a node
925 try:
926 for intent in json.loads( intents ):
927 state = intent.get( 'state', None )
928 if "INSTALLED" not in state:
929 installedCheck = False
930 intentId = intent.get( 'id', None )
931 intentStates.append( ( intentId, state ) )
932 except ( ValueError, TypeError ):
933 main.log.exception( "Error parsing intents." )
934 intentStates.sort()
935 for i, s in intentStates:
936 count += 1
937 main.log.info( "%-6s%-15s%-15s" %
938 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700939 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700940
Devin Lim58046fa2017-07-05 16:55:00 -0700941 main.step( "Wait a minute then ping again" )
942 # the wait is above
943 PingResult = main.TRUE
944 for i in range( 8, 18 ):
945 ping = main.Mininet1.pingHost( src="h" + str( i ),
946 target="h" + str( i + 10 ) )
947 PingResult = PingResult and ping
948 if ping == main.FALSE:
949 main.log.warn( "Ping failed between h" + str( i ) +
950 " and h" + str( i + 10 ) )
951 elif ping == main.TRUE:
952 main.log.info( "Ping test passed!" )
953 # Don't set PingResult or you'd override failures
954 if PingResult == main.FALSE:
955 main.log.error(
956 "Intents have not been installed correctly, pings failed." )
957 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700958 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700959 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700960 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700961 main.log.warn( json.dumps( json.loads( tmpIntents ),
962 sort_keys=True,
963 indent=4,
964 separators=( ',', ': ' ) ) )
965 except ( ValueError, TypeError ):
966 main.log.warn( repr( tmpIntents ) )
967 utilities.assert_equals(
968 expect=main.TRUE,
969 actual=PingResult,
970 onpass="Intents have been installed correctly and pings work",
971 onfail="Intents have not been installed correctly, pings failed." )
972
Devin Lim142b5342017-07-20 15:22:39 -0700973 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700974 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700975 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700976 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700977 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700978 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700979 actual=rolesNotNull,
980 onpass="Each device has a master",
981 onfail="Some devices don't have a master assigned" )
982
Devin Lim142b5342017-07-20 15:22:39 -0700983 def checkTheRole( self ):
984 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700985 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700986 consistentMastership = True
987 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700988 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700989 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700990 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700991 main.log.error( "Error in getting " + node + " roles" )
992 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700993 repr( ONOSMastership[ i ] ) )
994 rolesResults = False
995 utilities.assert_equals(
996 expect=True,
997 actual=rolesResults,
998 onpass="No error in reading roles output",
999 onfail="Error in reading roles from ONOS" )
1000
1001 main.step( "Check for consistency in roles from each controller" )
1002 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1003 main.log.info(
1004 "Switch roles are consistent across all ONOS nodes" )
1005 else:
1006 consistentMastership = False
1007 utilities.assert_equals(
1008 expect=True,
1009 actual=consistentMastership,
1010 onpass="Switch roles are consistent across all ONOS nodes",
1011 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001012 return ONOSMastership, rolesResults, consistentMastership
1013
1014 def checkingIntents( self ):
1015 main.step( "Get the intents from each controller" )
1016 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1017 intentsResults = True
1018 for i in range( len( ONOSIntents ) ):
1019 node = str( main.Cluster.active( i ) )
1020 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1021 main.log.error( "Error in getting " + node + " intents" )
1022 main.log.warn( node + " intents response: " +
1023 repr( ONOSIntents[ i ] ) )
1024 intentsResults = False
1025 utilities.assert_equals(
1026 expect=True,
1027 actual=intentsResults,
1028 onpass="No error in reading intents output",
1029 onfail="Error in reading intents from ONOS" )
1030 return ONOSIntents, intentsResults
1031
1032 def readingState( self, main ):
1033 """
1034 Reading state of ONOS
1035 """
1036 import json
Devin Lim142b5342017-07-20 15:22:39 -07001037 assert main, "main not defined"
1038 assert utilities.assert_equals, "utilities.assert_equals not defined"
1039 try:
1040 from tests.dependencies.topology import Topology
1041 except ImportError:
1042 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001043 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001044 try:
1045 main.topoRelated
1046 except ( NameError, AttributeError ):
1047 main.topoRelated = Topology()
1048 main.case( "Setting up and gathering data for current state" )
1049 # The general idea for this test case is to pull the state of
1050 # ( intents,flows, topology,... ) from each ONOS node
1051 # We can then compare them with each other and also with past states
1052
1053 global mastershipState
1054 mastershipState = '[]'
1055
1056 self.checkRoleNotNull()
1057
1058 main.step( "Get the Mastership of each switch from each controller" )
1059 mastershipCheck = main.FALSE
1060
1061 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001062
1063 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001064 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001065 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001066 try:
1067 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001068 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001069 json.dumps(
1070 json.loads( ONOSMastership[ i ] ),
1071 sort_keys=True,
1072 indent=4,
1073 separators=( ',', ': ' ) ) )
1074 except ( ValueError, TypeError ):
1075 main.log.warn( repr( ONOSMastership[ i ] ) )
1076 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001077 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001078 mastershipState = ONOSMastership[ 0 ]
1079
Devin Lim58046fa2017-07-05 16:55:00 -07001080 global intentState
1081 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001082 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001083 intentCheck = main.FALSE
1084 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001085
Devin Lim58046fa2017-07-05 16:55:00 -07001086 main.step( "Check for consistency in Intents from each controller" )
1087 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1088 main.log.info( "Intents are consistent across all ONOS " +
1089 "nodes" )
1090 else:
1091 consistentIntents = False
1092 main.log.error( "Intents not consistent" )
1093 utilities.assert_equals(
1094 expect=True,
1095 actual=consistentIntents,
1096 onpass="Intents are consistent across all ONOS nodes",
1097 onfail="ONOS nodes have different views of intents" )
1098
1099 if intentsResults:
1100 # Try to make it easy to figure out what is happening
1101 #
1102 # Intent ONOS1 ONOS2 ...
1103 # 0x01 INSTALLED INSTALLING
1104 # ... ... ...
1105 # ... ... ...
1106 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001107 for ctrl in main.Cluster.active():
1108 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001109 main.log.warn( title )
1110 # get all intent keys in the cluster
1111 keys = []
1112 try:
1113 # Get the set of all intent keys
1114 for nodeStr in ONOSIntents:
1115 node = json.loads( nodeStr )
1116 for intent in node:
1117 keys.append( intent.get( 'id' ) )
1118 keys = set( keys )
1119 # For each intent key, print the state on each node
1120 for key in keys:
1121 row = "%-13s" % key
1122 for nodeStr in ONOSIntents:
1123 node = json.loads( nodeStr )
1124 for intent in node:
1125 if intent.get( 'id', "Error" ) == key:
1126 row += "%-15s" % intent.get( 'state' )
1127 main.log.warn( row )
1128 # End of intent state table
1129 except ValueError as e:
1130 main.log.exception( e )
1131 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1132
1133 if intentsResults and not consistentIntents:
1134 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001135 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001136 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1137 sort_keys=True,
1138 indent=4,
1139 separators=( ',', ': ' ) ) )
1140 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001141 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001142 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001143 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001144 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 else:
Jon Hallca319892017-06-15 15:25:22 -07001149 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001150 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001151 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001152 intentState = ONOSIntents[ 0 ]
1153
1154 main.step( "Get the flows from each controller" )
1155 global flowState
1156 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001157 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001158 ONOSFlowsJson = []
1159 flowCheck = main.FALSE
1160 consistentFlows = True
1161 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001162 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001163 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001164 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001165 main.log.error( "Error in getting " + node + " flows" )
1166 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001167 repr( ONOSFlows[ i ] ) )
1168 flowsResults = False
1169 ONOSFlowsJson.append( None )
1170 else:
1171 try:
1172 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1173 except ( ValueError, TypeError ):
1174 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001175 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001176 " response as json." )
1177 main.log.error( repr( ONOSFlows[ i ] ) )
1178 ONOSFlowsJson.append( None )
1179 flowsResults = False
1180 utilities.assert_equals(
1181 expect=True,
1182 actual=flowsResults,
1183 onpass="No error in reading flows output",
1184 onfail="Error in reading flows from ONOS" )
1185
1186 main.step( "Check for consistency in Flows from each controller" )
1187 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1188 if all( tmp ):
1189 main.log.info( "Flow count is consistent across all ONOS nodes" )
1190 else:
1191 consistentFlows = False
1192 utilities.assert_equals(
1193 expect=True,
1194 actual=consistentFlows,
1195 onpass="The flow count is consistent across all ONOS nodes",
1196 onfail="ONOS nodes have different flow counts" )
1197
1198 if flowsResults and not consistentFlows:
1199 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001200 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001201 try:
1202 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001203 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001204 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1205 indent=4, separators=( ',', ': ' ) ) )
1206 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001207 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001208 repr( ONOSFlows[ i ] ) )
1209 elif flowsResults and consistentFlows:
1210 flowCheck = main.TRUE
1211 flowState = ONOSFlows[ 0 ]
1212
1213 main.step( "Get the OF Table entries" )
1214 global flows
Jon Hallab611372018-02-21 15:26:05 -08001215 flows = {}
1216 for swName, swDetails in main.Mininet1.getSwitches().items():
1217 main.log.debug( repr( swName ) + repr( swDetails ) )
1218 flows[ swName ] = main.Mininet1.getFlowTable( swName, version="1.3", debug=False )
Devin Lim58046fa2017-07-05 16:55:00 -07001219 if flowCheck == main.FALSE:
1220 for table in flows:
1221 main.log.warn( table )
1222 # TODO: Compare switch flow tables with ONOS flow tables
1223
1224 main.step( "Start continuous pings" )
Jon Hallab611372018-02-21 15:26:05 -08001225 if main.params.get( 'PING', False ):
1226 # TODO: Make this more dynamic and less hardcoded, ie, # or ping pairs
1227 main.Mininet2.pingLong(
1228 src=main.params[ 'PING' ][ 'source1' ],
1229 target=main.params[ 'PING' ][ 'target1' ],
1230 pingTime=500 )
1231 main.Mininet2.pingLong(
1232 src=main.params[ 'PING' ][ 'source2' ],
1233 target=main.params[ 'PING' ][ 'target2' ],
1234 pingTime=500 )
1235 main.Mininet2.pingLong(
1236 src=main.params[ 'PING' ][ 'source3' ],
1237 target=main.params[ 'PING' ][ 'target3' ],
1238 pingTime=500 )
1239 main.Mininet2.pingLong(
1240 src=main.params[ 'PING' ][ 'source4' ],
1241 target=main.params[ 'PING' ][ 'target4' ],
1242 pingTime=500 )
1243 main.Mininet2.pingLong(
1244 src=main.params[ 'PING' ][ 'source5' ],
1245 target=main.params[ 'PING' ][ 'target5' ],
1246 pingTime=500 )
1247 main.Mininet2.pingLong(
1248 src=main.params[ 'PING' ][ 'source6' ],
1249 target=main.params[ 'PING' ][ 'target6' ],
1250 pingTime=500 )
1251 main.Mininet2.pingLong(
1252 src=main.params[ 'PING' ][ 'source7' ],
1253 target=main.params[ 'PING' ][ 'target7' ],
1254 pingTime=500 )
1255 main.Mininet2.pingLong(
1256 src=main.params[ 'PING' ][ 'source8' ],
1257 target=main.params[ 'PING' ][ 'target8' ],
1258 pingTime=500 )
1259 main.Mininet2.pingLong(
1260 src=main.params[ 'PING' ][ 'source9' ],
1261 target=main.params[ 'PING' ][ 'target9' ],
1262 pingTime=500 )
1263 main.Mininet2.pingLong(
1264 src=main.params[ 'PING' ][ 'source10' ],
1265 target=main.params[ 'PING' ][ 'target10' ],
1266 pingTime=500 )
Devin Lim58046fa2017-07-05 16:55:00 -07001267
1268 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001269 devices = main.topoRelated.getAll( "devices" )
1270 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1271 ports = main.topoRelated.getAll( "ports" )
1272 links = main.topoRelated.getAll( "links" )
1273 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001274 # Compare json objects for hosts and dataplane clusters
1275
1276 # hosts
1277 main.step( "Host view is consistent across ONOS nodes" )
1278 consistentHostsResult = main.TRUE
1279 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001280 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001281 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1282 if hosts[ controller ] == hosts[ 0 ]:
1283 continue
1284 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001285 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001286 controllerStr +
1287 " is inconsistent with ONOS1" )
1288 main.log.warn( repr( hosts[ controller ] ) )
1289 consistentHostsResult = main.FALSE
1290
1291 else:
Jon Hallca319892017-06-15 15:25:22 -07001292 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001293 controllerStr )
1294 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001295 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001296 " hosts response: " +
1297 repr( hosts[ controller ] ) )
1298 utilities.assert_equals(
1299 expect=main.TRUE,
1300 actual=consistentHostsResult,
1301 onpass="Hosts view is consistent across all ONOS nodes",
1302 onfail="ONOS nodes have different views of hosts" )
1303
1304 main.step( "Each host has an IP address" )
1305 ipResult = main.TRUE
1306 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001307 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001308 if hosts[ controller ]:
1309 for host in hosts[ controller ]:
1310 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001311 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001312 controllerStr + ": " + str( host ) )
1313 ipResult = main.FALSE
1314 utilities.assert_equals(
1315 expect=main.TRUE,
1316 actual=ipResult,
1317 onpass="The ips of the hosts aren't empty",
1318 onfail="The ip of at least one host is missing" )
1319
1320 # Strongly connected clusters of devices
1321 main.step( "Cluster view is consistent across ONOS nodes" )
1322 consistentClustersResult = main.TRUE
1323 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001324 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001325 if "Error" not in clusters[ controller ]:
1326 if clusters[ controller ] == clusters[ 0 ]:
1327 continue
1328 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001329 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001330 " is inconsistent with ONOS1" )
1331 consistentClustersResult = main.FALSE
1332
1333 else:
1334 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001335 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001336 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001337 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001338 " clusters response: " +
1339 repr( clusters[ controller ] ) )
1340 utilities.assert_equals(
1341 expect=main.TRUE,
1342 actual=consistentClustersResult,
1343 onpass="Clusters view is consistent across all ONOS nodes",
1344 onfail="ONOS nodes have different views of clusters" )
1345 if not consistentClustersResult:
1346 main.log.debug( clusters )
1347
1348 # there should always only be one cluster
1349 main.step( "Cluster view correct across ONOS nodes" )
1350 try:
1351 numClusters = len( json.loads( clusters[ 0 ] ) )
1352 except ( ValueError, TypeError ):
1353 main.log.exception( "Error parsing clusters[0]: " +
1354 repr( clusters[ 0 ] ) )
1355 numClusters = "ERROR"
1356 utilities.assert_equals(
1357 expect=1,
1358 actual=numClusters,
1359 onpass="ONOS shows 1 SCC",
1360 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1361
1362 main.step( "Comparing ONOS topology to MN" )
1363 devicesResults = main.TRUE
1364 linksResults = main.TRUE
1365 hostsResults = main.TRUE
1366 mnSwitches = main.Mininet1.getSwitches()
1367 mnLinks = main.Mininet1.getLinks()
1368 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001369 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001370 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001371 currentDevicesResult = main.topoRelated.compareDevicePort(
1372 main.Mininet1, controller,
1373 mnSwitches, devices, ports )
1374 utilities.assert_equals( expect=main.TRUE,
1375 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001376 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001377 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001378 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001379 " Switches view is incorrect" )
1380
1381 currentLinksResult = main.topoRelated.compareBase( links, controller,
1382 main.Mininet1.compareLinks,
1383 [ mnSwitches, mnLinks ] )
1384 utilities.assert_equals( expect=main.TRUE,
1385 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001386 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001387 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001388 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001389 " links view is incorrect" )
1390
1391 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1392 currentHostsResult = main.Mininet1.compareHosts(
1393 mnHosts,
1394 hosts[ controller ] )
1395 else:
1396 currentHostsResult = main.FALSE
1397 utilities.assert_equals( expect=main.TRUE,
1398 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001399 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001400 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001401 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001402 " hosts don't match Mininet" )
1403
1404 devicesResults = devicesResults and currentDevicesResult
1405 linksResults = linksResults and currentLinksResult
1406 hostsResults = hostsResults and currentHostsResult
1407
1408 main.step( "Device information is correct" )
1409 utilities.assert_equals(
1410 expect=main.TRUE,
1411 actual=devicesResults,
1412 onpass="Device information is correct",
1413 onfail="Device information is incorrect" )
1414
1415 main.step( "Links are correct" )
1416 utilities.assert_equals(
1417 expect=main.TRUE,
1418 actual=linksResults,
1419 onpass="Link are correct",
1420 onfail="Links are incorrect" )
1421
1422 main.step( "Hosts are correct" )
1423 utilities.assert_equals(
1424 expect=main.TRUE,
1425 actual=hostsResults,
1426 onpass="Hosts are correct",
1427 onfail="Hosts are incorrect" )
1428
1429 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001430 """
1431 Check for basic functionality with distributed primitives
1432 """
Jon Halle0f0b342017-04-18 11:43:47 -07001433 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001434 try:
1435 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001436 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001437 assert main.pCounterName, "main.pCounterName not defined"
1438 assert main.onosSetName, "main.onosSetName not defined"
1439 # NOTE: assert fails if value is 0/None/Empty/False
1440 try:
1441 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001442 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001443 main.log.error( "main.pCounterValue not defined, setting to 0" )
1444 main.pCounterValue = 0
1445 try:
1446 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001447 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001448 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001449 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001450 # Variables for the distributed primitives tests. These are local only
1451 addValue = "a"
1452 addAllValue = "a b c d e f"
1453 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001454 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001456 workQueueName = "TestON-Queue"
1457 workQueueCompleted = 0
1458 workQueueInProgress = 0
1459 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001460
1461 description = "Check for basic functionality with distributed " +\
1462 "primitives"
1463 main.case( description )
1464 main.caseExplanation = "Test the methods of the distributed " +\
1465 "primitives (counters and sets) throught the cli"
1466 # DISTRIBUTED ATOMIC COUNTERS
1467 # Partitioned counters
1468 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001469 pCounters = main.Cluster.command( "counterTestAddAndGet",
1470 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001471 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001472 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001473 main.pCounterValue += 1
1474 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001475 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001476 pCounterResults = True
1477 for i in addedPValues:
1478 tmpResult = i in pCounters
1479 pCounterResults = pCounterResults and tmpResult
1480 if not tmpResult:
1481 main.log.error( str( i ) + " is not in partitioned "
1482 "counter incremented results" )
1483 utilities.assert_equals( expect=True,
1484 actual=pCounterResults,
1485 onpass="Default counter incremented",
1486 onfail="Error incrementing default" +
1487 " counter" )
1488
1489 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001490 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1491 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001492 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001493 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 addedPValues.append( main.pCounterValue )
1495 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001496 # Check that counter incremented numController times
1497 pCounterResults = True
1498 for i in addedPValues:
1499 tmpResult = i in pCounters
1500 pCounterResults = pCounterResults and tmpResult
1501 if not tmpResult:
1502 main.log.error( str( i ) + " is not in partitioned "
1503 "counter incremented results" )
1504 utilities.assert_equals( expect=True,
1505 actual=pCounterResults,
1506 onpass="Default counter incremented",
1507 onfail="Error incrementing default" +
1508 " counter" )
1509
1510 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001511 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001512 utilities.assert_equals( expect=main.TRUE,
1513 actual=incrementCheck,
1514 onpass="Added counters are correct",
1515 onfail="Added counters are incorrect" )
1516
1517 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001518 pCounters = main.Cluster.command( "counterTestAddAndGet",
1519 args=[ main.pCounterName ],
1520 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001521 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001522 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001523 main.pCounterValue += -8
1524 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001525 # Check that counter incremented numController times
1526 pCounterResults = True
1527 for i in addedPValues:
1528 tmpResult = i in pCounters
1529 pCounterResults = pCounterResults and tmpResult
1530 if not tmpResult:
1531 main.log.error( str( i ) + " is not in partitioned "
1532 "counter incremented results" )
1533 utilities.assert_equals( expect=True,
1534 actual=pCounterResults,
1535 onpass="Default counter incremented",
1536 onfail="Error incrementing default" +
1537 " counter" )
1538
1539 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001540 pCounters = main.Cluster.command( "counterTestAddAndGet",
1541 args=[ main.pCounterName ],
1542 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001543 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001544 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001545 main.pCounterValue += 5
1546 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001547
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001548 # Check that counter incremented numController times
1549 pCounterResults = True
1550 for i in addedPValues:
1551 tmpResult = i in pCounters
1552 pCounterResults = pCounterResults and tmpResult
1553 if not tmpResult:
1554 main.log.error( str( i ) + " is not in partitioned "
1555 "counter incremented results" )
1556 utilities.assert_equals( expect=True,
1557 actual=pCounterResults,
1558 onpass="Default counter incremented",
1559 onfail="Error incrementing default" +
1560 " counter" )
1561
1562 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001563 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1564 args=[ main.pCounterName ],
1565 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001567 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001568 addedPValues.append( main.pCounterValue )
1569 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001570 # Check that counter incremented numController times
1571 pCounterResults = True
1572 for i in addedPValues:
1573 tmpResult = i in pCounters
1574 pCounterResults = pCounterResults and tmpResult
1575 if not tmpResult:
1576 main.log.error( str( i ) + " is not in partitioned "
1577 "counter incremented results" )
1578 utilities.assert_equals( expect=True,
1579 actual=pCounterResults,
1580 onpass="Default counter incremented",
1581 onfail="Error incrementing default" +
1582 " counter" )
1583
1584 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001585 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001586 utilities.assert_equals( expect=main.TRUE,
1587 actual=incrementCheck,
1588 onpass="Added counters are correct",
1589 onfail="Added counters are incorrect" )
1590
1591 # DISTRIBUTED SETS
1592 main.step( "Distributed Set get" )
1593 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001594 getResponses = main.Cluster.command( "setTestGet",
1595 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001596 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001597 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001598 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001599 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001600 current = set( getResponses[ i ] )
1601 if len( current ) == len( getResponses[ i ] ):
1602 # no repeats
1603 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001604 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001605 " has incorrect view" +
1606 " of set " + main.onosSetName + ":\n" +
1607 str( getResponses[ i ] ) )
1608 main.log.debug( "Expected: " + str( main.onosSet ) )
1609 main.log.debug( "Actual: " + str( current ) )
1610 getResults = main.FALSE
1611 else:
1612 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001613 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001614 " has repeat elements in" +
1615 " set " + main.onosSetName + ":\n" +
1616 str( getResponses[ i ] ) )
1617 getResults = main.FALSE
1618 elif getResponses[ i ] == main.ERROR:
1619 getResults = main.FALSE
1620 utilities.assert_equals( expect=main.TRUE,
1621 actual=getResults,
1622 onpass="Set elements are correct",
1623 onfail="Set elements are incorrect" )
1624
1625 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001626 sizeResponses = main.Cluster.command( "setTestSize",
1627 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001628 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001629 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001630 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001631 if size != sizeResponses[ i ]:
1632 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001633 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001634 " expected a size of " + str( size ) +
1635 " for set " + main.onosSetName +
1636 " but got " + str( sizeResponses[ i ] ) )
1637 utilities.assert_equals( expect=main.TRUE,
1638 actual=sizeResults,
1639 onpass="Set sizes are correct",
1640 onfail="Set sizes are incorrect" )
1641
1642 main.step( "Distributed Set add()" )
1643 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001644 addResponses = main.Cluster.command( "setTestAdd",
1645 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001646 # main.TRUE = successfully changed the set
1647 # main.FALSE = action resulted in no change in set
1648 # main.ERROR - Some error in executing the function
1649 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001650 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001651 if addResponses[ i ] == main.TRUE:
1652 # All is well
1653 pass
1654 elif addResponses[ i ] == main.FALSE:
1655 # Already in set, probably fine
1656 pass
1657 elif addResponses[ i ] == main.ERROR:
1658 # Error in execution
1659 addResults = main.FALSE
1660 else:
1661 # unexpected result
1662 addResults = main.FALSE
1663 if addResults != main.TRUE:
1664 main.log.error( "Error executing set add" )
1665
1666 # Check if set is still correct
1667 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001668 getResponses = main.Cluster.command( "setTestGet",
1669 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001670 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001671 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001672 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001673 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001674 current = set( getResponses[ i ] )
1675 if len( current ) == len( getResponses[ i ] ):
1676 # no repeats
1677 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001678 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001679 " of set " + main.onosSetName + ":\n" +
1680 str( getResponses[ i ] ) )
1681 main.log.debug( "Expected: " + str( main.onosSet ) )
1682 main.log.debug( "Actual: " + str( current ) )
1683 getResults = main.FALSE
1684 else:
1685 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001686 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001687 " set " + main.onosSetName + ":\n" +
1688 str( getResponses[ i ] ) )
1689 getResults = main.FALSE
1690 elif getResponses[ i ] == main.ERROR:
1691 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001692 sizeResponses = main.Cluster.command( "setTestSize",
1693 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001694 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001695 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001696 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001697 if size != sizeResponses[ i ]:
1698 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001699 main.log.error( node + " expected a size of " +
1700 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001701 " but got " + str( sizeResponses[ i ] ) )
1702 addResults = addResults and getResults and sizeResults
1703 utilities.assert_equals( expect=main.TRUE,
1704 actual=addResults,
1705 onpass="Set add correct",
1706 onfail="Set add was incorrect" )
1707
1708 main.step( "Distributed Set addAll()" )
1709 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001710 addResponses = main.Cluster.command( "setTestAdd",
1711 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001712 # main.TRUE = successfully changed the set
1713 # main.FALSE = action resulted in no change in set
1714 # main.ERROR - Some error in executing the function
1715 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001716 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001717 if addResponses[ i ] == main.TRUE:
1718 # All is well
1719 pass
1720 elif addResponses[ i ] == main.FALSE:
1721 # Already in set, probably fine
1722 pass
1723 elif addResponses[ i ] == main.ERROR:
1724 # Error in execution
1725 addAllResults = main.FALSE
1726 else:
1727 # unexpected result
1728 addAllResults = main.FALSE
1729 if addAllResults != main.TRUE:
1730 main.log.error( "Error executing set addAll" )
1731
1732 # Check if set is still correct
1733 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001734 getResponses = main.Cluster.command( "setTestGet",
1735 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001736 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001737 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001738 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001739 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001740 current = set( getResponses[ i ] )
1741 if len( current ) == len( getResponses[ i ] ):
1742 # no repeats
1743 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001744 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001745 " of set " + main.onosSetName + ":\n" +
1746 str( getResponses[ i ] ) )
1747 main.log.debug( "Expected: " + str( main.onosSet ) )
1748 main.log.debug( "Actual: " + str( current ) )
1749 getResults = main.FALSE
1750 else:
1751 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001752 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001753 " set " + main.onosSetName + ":\n" +
1754 str( getResponses[ i ] ) )
1755 getResults = main.FALSE
1756 elif getResponses[ i ] == main.ERROR:
1757 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001758 sizeResponses = main.Cluster.command( "setTestSize",
1759 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001760 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001761 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001762 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001763 if size != sizeResponses[ i ]:
1764 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001765 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001766 " for set " + main.onosSetName +
1767 " but got " + str( sizeResponses[ i ] ) )
1768 addAllResults = addAllResults and getResults and sizeResults
1769 utilities.assert_equals( expect=main.TRUE,
1770 actual=addAllResults,
1771 onpass="Set addAll correct",
1772 onfail="Set addAll was incorrect" )
1773
1774 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001775 containsResponses = main.Cluster.command( "setTestGet",
1776 args=[ main.onosSetName ],
1777 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001778 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001779 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 if containsResponses[ i ] == main.ERROR:
1781 containsResults = main.FALSE
1782 else:
1783 containsResults = containsResults and\
1784 containsResponses[ i ][ 1 ]
1785 utilities.assert_equals( expect=main.TRUE,
1786 actual=containsResults,
1787 onpass="Set contains is functional",
1788 onfail="Set contains failed" )
1789
1790 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001791 containsAllResponses = main.Cluster.command( "setTestGet",
1792 args=[ main.onosSetName ],
1793 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001794 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001795 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001796 if containsResponses[ i ] == main.ERROR:
1797 containsResults = main.FALSE
1798 else:
1799 containsResults = containsResults and\
1800 containsResponses[ i ][ 1 ]
1801 utilities.assert_equals( expect=main.TRUE,
1802 actual=containsAllResults,
1803 onpass="Set containsAll is functional",
1804 onfail="Set containsAll failed" )
1805
1806 main.step( "Distributed Set remove()" )
1807 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001808 removeResponses = main.Cluster.command( "setTestRemove",
1809 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001810 # main.TRUE = successfully changed the set
1811 # main.FALSE = action resulted in no change in set
1812 # main.ERROR - Some error in executing the function
1813 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001814 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001815 if removeResponses[ i ] == main.TRUE:
1816 # All is well
1817 pass
1818 elif removeResponses[ i ] == main.FALSE:
1819 # not in set, probably fine
1820 pass
1821 elif removeResponses[ i ] == main.ERROR:
1822 # Error in execution
1823 removeResults = main.FALSE
1824 else:
1825 # unexpected result
1826 removeResults = main.FALSE
1827 if removeResults != main.TRUE:
1828 main.log.error( "Error executing set remove" )
1829
1830 # Check if set is still correct
1831 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001832 getResponses = main.Cluster.command( "setTestGet",
1833 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001834 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001835 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001836 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001837 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001838 current = set( getResponses[ i ] )
1839 if len( current ) == len( getResponses[ i ] ):
1840 # no repeats
1841 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001842 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001843 " of set " + main.onosSetName + ":\n" +
1844 str( getResponses[ i ] ) )
1845 main.log.debug( "Expected: " + str( main.onosSet ) )
1846 main.log.debug( "Actual: " + str( current ) )
1847 getResults = main.FALSE
1848 else:
1849 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001850 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001851 " set " + main.onosSetName + ":\n" +
1852 str( getResponses[ i ] ) )
1853 getResults = main.FALSE
1854 elif getResponses[ i ] == main.ERROR:
1855 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001856 sizeResponses = main.Cluster.command( "setTestSize",
1857 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001858 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001859 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001860 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001861 if size != sizeResponses[ i ]:
1862 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001863 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001864 " for set " + main.onosSetName +
1865 " but got " + str( sizeResponses[ i ] ) )
1866 removeResults = removeResults and getResults and sizeResults
1867 utilities.assert_equals( expect=main.TRUE,
1868 actual=removeResults,
1869 onpass="Set remove correct",
1870 onfail="Set remove was incorrect" )
1871
1872 main.step( "Distributed Set removeAll()" )
1873 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001874 removeAllResponses = main.Cluster.command( "setTestRemove",
1875 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001876 # main.TRUE = successfully changed the set
1877 # main.FALSE = action resulted in no change in set
1878 # main.ERROR - Some error in executing the function
1879 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001880 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001881 if removeAllResponses[ i ] == main.TRUE:
1882 # All is well
1883 pass
1884 elif removeAllResponses[ i ] == main.FALSE:
1885 # not in set, probably fine
1886 pass
1887 elif removeAllResponses[ i ] == main.ERROR:
1888 # Error in execution
1889 removeAllResults = main.FALSE
1890 else:
1891 # unexpected result
1892 removeAllResults = main.FALSE
1893 if removeAllResults != main.TRUE:
1894 main.log.error( "Error executing set removeAll" )
1895
1896 # Check if set is still correct
1897 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001898 getResponses = main.Cluster.command( "setTestGet",
1899 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001900 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001901 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001902 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001903 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001904 current = set( getResponses[ i ] )
1905 if len( current ) == len( getResponses[ i ] ):
1906 # no repeats
1907 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001908 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001909 " of set " + main.onosSetName + ":\n" +
1910 str( getResponses[ i ] ) )
1911 main.log.debug( "Expected: " + str( main.onosSet ) )
1912 main.log.debug( "Actual: " + str( current ) )
1913 getResults = main.FALSE
1914 else:
1915 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001916 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001917 " set " + main.onosSetName + ":\n" +
1918 str( getResponses[ i ] ) )
1919 getResults = main.FALSE
1920 elif getResponses[ i ] == main.ERROR:
1921 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001922 sizeResponses = main.Cluster.command( "setTestSize",
1923 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001924 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001925 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001926 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001927 if size != sizeResponses[ i ]:
1928 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001929 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001930 " for set " + main.onosSetName +
1931 " but got " + str( sizeResponses[ i ] ) )
1932 removeAllResults = removeAllResults and getResults and sizeResults
1933 utilities.assert_equals( expect=main.TRUE,
1934 actual=removeAllResults,
1935 onpass="Set removeAll correct",
1936 onfail="Set removeAll was incorrect" )
1937
1938 main.step( "Distributed Set addAll()" )
1939 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001940 addResponses = main.Cluster.command( "setTestAdd",
1941 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001942 # main.TRUE = successfully changed the set
1943 # main.FALSE = action resulted in no change in set
1944 # main.ERROR - Some error in executing the function
1945 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001946 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001947 if addResponses[ i ] == main.TRUE:
1948 # All is well
1949 pass
1950 elif addResponses[ i ] == main.FALSE:
1951 # Already in set, probably fine
1952 pass
1953 elif addResponses[ i ] == main.ERROR:
1954 # Error in execution
1955 addAllResults = main.FALSE
1956 else:
1957 # unexpected result
1958 addAllResults = main.FALSE
1959 if addAllResults != main.TRUE:
1960 main.log.error( "Error executing set addAll" )
1961
1962 # Check if set is still correct
1963 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001964 getResponses = main.Cluster.command( "setTestGet",
1965 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001966 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001967 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001968 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001969 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001970 current = set( getResponses[ i ] )
1971 if len( current ) == len( getResponses[ i ] ):
1972 # no repeats
1973 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001974 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001975 " of set " + main.onosSetName + ":\n" +
1976 str( getResponses[ i ] ) )
1977 main.log.debug( "Expected: " + str( main.onosSet ) )
1978 main.log.debug( "Actual: " + str( current ) )
1979 getResults = main.FALSE
1980 else:
1981 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001982 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001983 " set " + main.onosSetName + ":\n" +
1984 str( getResponses[ i ] ) )
1985 getResults = main.FALSE
1986 elif getResponses[ i ] == main.ERROR:
1987 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001988 sizeResponses = main.Cluster.command( "setTestSize",
1989 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001990 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001991 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001992 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001993 if size != sizeResponses[ i ]:
1994 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001995 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001996 " for set " + main.onosSetName +
1997 " but got " + str( sizeResponses[ i ] ) )
1998 addAllResults = addAllResults and getResults and sizeResults
1999 utilities.assert_equals( expect=main.TRUE,
2000 actual=addAllResults,
2001 onpass="Set addAll correct",
2002 onfail="Set addAll was incorrect" )
2003
2004 main.step( "Distributed Set clear()" )
2005 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002006 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07002007 args=[ main.onosSetName, " " ], # Values doesn't matter
2008 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002009 # main.TRUE = successfully changed the set
2010 # main.FALSE = action resulted in no change in set
2011 # main.ERROR - Some error in executing the function
2012 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002013 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 if clearResponses[ i ] == main.TRUE:
2015 # All is well
2016 pass
2017 elif clearResponses[ i ] == main.FALSE:
2018 # Nothing set, probably fine
2019 pass
2020 elif clearResponses[ i ] == main.ERROR:
2021 # Error in execution
2022 clearResults = main.FALSE
2023 else:
2024 # unexpected result
2025 clearResults = main.FALSE
2026 if clearResults != main.TRUE:
2027 main.log.error( "Error executing set clear" )
2028
2029 # Check if set is still correct
2030 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002031 getResponses = main.Cluster.command( "setTestGet",
2032 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002033 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002034 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002035 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002036 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002037 current = set( getResponses[ i ] )
2038 if len( current ) == len( getResponses[ i ] ):
2039 # no repeats
2040 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002041 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002042 " of set " + main.onosSetName + ":\n" +
2043 str( getResponses[ i ] ) )
2044 main.log.debug( "Expected: " + str( main.onosSet ) )
2045 main.log.debug( "Actual: " + str( current ) )
2046 getResults = main.FALSE
2047 else:
2048 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002049 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002050 " set " + main.onosSetName + ":\n" +
2051 str( getResponses[ i ] ) )
2052 getResults = main.FALSE
2053 elif getResponses[ i ] == main.ERROR:
2054 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002055 sizeResponses = main.Cluster.command( "setTestSize",
2056 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002057 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002058 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002059 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002060 if size != sizeResponses[ i ]:
2061 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002062 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002063 " for set " + main.onosSetName +
2064 " but got " + str( sizeResponses[ i ] ) )
2065 clearResults = clearResults and getResults and sizeResults
2066 utilities.assert_equals( expect=main.TRUE,
2067 actual=clearResults,
2068 onpass="Set clear correct",
2069 onfail="Set clear was incorrect" )
2070
2071 main.step( "Distributed Set addAll()" )
2072 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002073 addResponses = main.Cluster.command( "setTestAdd",
2074 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002075 # main.TRUE = successfully changed the set
2076 # main.FALSE = action resulted in no change in set
2077 # main.ERROR - Some error in executing the function
2078 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002079 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002080 if addResponses[ i ] == main.TRUE:
2081 # All is well
2082 pass
2083 elif addResponses[ i ] == main.FALSE:
2084 # Already in set, probably fine
2085 pass
2086 elif addResponses[ i ] == main.ERROR:
2087 # Error in execution
2088 addAllResults = main.FALSE
2089 else:
2090 # unexpected result
2091 addAllResults = main.FALSE
2092 if addAllResults != main.TRUE:
2093 main.log.error( "Error executing set addAll" )
2094
2095 # Check if set is still correct
2096 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002097 getResponses = main.Cluster.command( "setTestGet",
2098 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002099 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002100 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002101 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002102 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002103 current = set( getResponses[ i ] )
2104 if len( current ) == len( getResponses[ i ] ):
2105 # no repeats
2106 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002107 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002108 " of set " + main.onosSetName + ":\n" +
2109 str( getResponses[ i ] ) )
2110 main.log.debug( "Expected: " + str( main.onosSet ) )
2111 main.log.debug( "Actual: " + str( current ) )
2112 getResults = main.FALSE
2113 else:
2114 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002115 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002116 " set " + main.onosSetName + ":\n" +
2117 str( getResponses[ i ] ) )
2118 getResults = main.FALSE
2119 elif getResponses[ i ] == main.ERROR:
2120 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002121 sizeResponses = main.Cluster.command( "setTestSize",
2122 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002123 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002124 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002125 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002126 if size != sizeResponses[ i ]:
2127 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002128 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002129 " for set " + main.onosSetName +
2130 " but got " + str( sizeResponses[ i ] ) )
2131 addAllResults = addAllResults and getResults and sizeResults
2132 utilities.assert_equals( expect=main.TRUE,
2133 actual=addAllResults,
2134 onpass="Set addAll correct",
2135 onfail="Set addAll was incorrect" )
2136
2137 main.step( "Distributed Set retain()" )
2138 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002139 retainResponses = main.Cluster.command( "setTestRemove",
2140 args=[ main.onosSetName, retainValue ],
2141 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002142 # main.TRUE = successfully changed the set
2143 # main.FALSE = action resulted in no change in set
2144 # main.ERROR - Some error in executing the function
2145 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002146 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 if retainResponses[ i ] == main.TRUE:
2148 # All is well
2149 pass
2150 elif retainResponses[ i ] == main.FALSE:
2151 # Already in set, probably fine
2152 pass
2153 elif retainResponses[ i ] == main.ERROR:
2154 # Error in execution
2155 retainResults = main.FALSE
2156 else:
2157 # unexpected result
2158 retainResults = main.FALSE
2159 if retainResults != main.TRUE:
2160 main.log.error( "Error executing set retain" )
2161
2162 # Check if set is still correct
2163 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002164 getResponses = main.Cluster.command( "setTestGet",
2165 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002166 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002167 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002168 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002169 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002170 current = set( getResponses[ i ] )
2171 if len( current ) == len( getResponses[ i ] ):
2172 # no repeats
2173 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002174 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002175 " of set " + main.onosSetName + ":\n" +
2176 str( getResponses[ i ] ) )
2177 main.log.debug( "Expected: " + str( main.onosSet ) )
2178 main.log.debug( "Actual: " + str( current ) )
2179 getResults = main.FALSE
2180 else:
2181 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002182 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002183 " set " + main.onosSetName + ":\n" +
2184 str( getResponses[ i ] ) )
2185 getResults = main.FALSE
2186 elif getResponses[ i ] == main.ERROR:
2187 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002188 sizeResponses = main.Cluster.command( "setTestSize",
2189 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002190 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002191 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002192 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002193 if size != sizeResponses[ i ]:
2194 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002195 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002196 str( size ) + " for set " + main.onosSetName +
2197 " but got " + str( sizeResponses[ i ] ) )
2198 retainResults = retainResults and getResults and sizeResults
2199 utilities.assert_equals( expect=main.TRUE,
2200 actual=retainResults,
2201 onpass="Set retain correct",
2202 onfail="Set retain was incorrect" )
2203
2204 # Transactional maps
2205 main.step( "Partitioned Transactional maps put" )
2206 tMapValue = "Testing"
2207 numKeys = 100
2208 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002209 ctrl = main.Cluster.next()
2210 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002211 if putResponses and len( putResponses ) == 100:
2212 for i in putResponses:
2213 if putResponses[ i ][ 'value' ] != tMapValue:
2214 putResult = False
2215 else:
2216 putResult = False
2217 if not putResult:
2218 main.log.debug( "Put response values: " + str( putResponses ) )
2219 utilities.assert_equals( expect=True,
2220 actual=putResult,
2221 onpass="Partitioned Transactional Map put successful",
2222 onfail="Partitioned Transactional Map put values are incorrect" )
2223
2224 main.step( "Partitioned Transactional maps get" )
2225 # FIXME: is this sleep needed?
2226 time.sleep( 5 )
2227
2228 getCheck = True
2229 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002230 getResponses = main.Cluster.command( "transactionalMapGet",
2231 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002232 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002233 for node in getResponses:
2234 if node != tMapValue:
2235 valueCheck = False
2236 if not valueCheck:
Jon Hall0e240372018-05-02 11:21:57 -07002237 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002238 main.log.warn( getResponses )
2239 getCheck = getCheck and valueCheck
2240 utilities.assert_equals( expect=True,
2241 actual=getCheck,
2242 onpass="Partitioned Transactional Map get values were correct",
2243 onfail="Partitioned Transactional Map values incorrect" )
2244
2245 # DISTRIBUTED ATOMIC VALUE
2246 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002247 getValues = main.Cluster.command( "valueTestGet",
2248 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002249 main.log.debug( getValues )
2250 # Check the results
2251 atomicValueGetResult = True
2252 expected = valueValue if valueValue is not None else "null"
2253 main.log.debug( "Checking for value of " + expected )
2254 for i in getValues:
2255 if i != expected:
2256 atomicValueGetResult = False
2257 utilities.assert_equals( expect=True,
2258 actual=atomicValueGetResult,
2259 onpass="Atomic Value get successful",
2260 onfail="Error getting atomic Value " +
2261 str( valueValue ) + ", found: " +
2262 str( getValues ) )
2263
2264 main.step( "Atomic Value set()" )
2265 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002266 setValues = main.Cluster.command( "valueTestSet",
2267 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002268 main.log.debug( setValues )
2269 # Check the results
2270 atomicValueSetResults = True
2271 for i in setValues:
2272 if i != main.TRUE:
2273 atomicValueSetResults = False
2274 utilities.assert_equals( expect=True,
2275 actual=atomicValueSetResults,
2276 onpass="Atomic Value set successful",
2277 onfail="Error setting atomic Value" +
2278 str( setValues ) )
2279
2280 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002281 getValues = main.Cluster.command( "valueTestGet",
2282 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002283 main.log.debug( getValues )
2284 # Check the results
2285 atomicValueGetResult = True
2286 expected = valueValue if valueValue is not None else "null"
2287 main.log.debug( "Checking for value of " + expected )
2288 for i in getValues:
2289 if i != expected:
2290 atomicValueGetResult = False
2291 utilities.assert_equals( expect=True,
2292 actual=atomicValueGetResult,
2293 onpass="Atomic Value get successful",
2294 onfail="Error getting atomic Value " +
2295 str( valueValue ) + ", found: " +
2296 str( getValues ) )
2297
2298 main.step( "Atomic Value compareAndSet()" )
2299 oldValue = valueValue
2300 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002301 ctrl = main.Cluster.next()
2302 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002303 main.log.debug( CASValue )
2304 utilities.assert_equals( expect=main.TRUE,
2305 actual=CASValue,
2306 onpass="Atomic Value comapreAndSet successful",
2307 onfail="Error setting atomic Value:" +
2308 str( CASValue ) )
2309
2310 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002311 getValues = main.Cluster.command( "valueTestGet",
2312 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002313 main.log.debug( getValues )
2314 # Check the results
2315 atomicValueGetResult = True
2316 expected = valueValue if valueValue is not None else "null"
2317 main.log.debug( "Checking for value of " + expected )
2318 for i in getValues:
2319 if i != expected:
2320 atomicValueGetResult = False
2321 utilities.assert_equals( expect=True,
2322 actual=atomicValueGetResult,
2323 onpass="Atomic Value get successful",
2324 onfail="Error getting atomic Value " +
2325 str( valueValue ) + ", found: " +
2326 str( getValues ) )
2327
2328 main.step( "Atomic Value getAndSet()" )
2329 oldValue = valueValue
2330 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002331 ctrl = main.Cluster.next()
2332 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002333 main.log.debug( GASValue )
2334 expected = oldValue if oldValue is not None else "null"
2335 utilities.assert_equals( expect=expected,
2336 actual=GASValue,
2337 onpass="Atomic Value GAS successful",
2338 onfail="Error with GetAndSet atomic Value: expected " +
2339 str( expected ) + ", found: " +
2340 str( GASValue ) )
2341
2342 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002343 getValues = main.Cluster.command( "valueTestGet",
2344 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002345 main.log.debug( getValues )
2346 # Check the results
2347 atomicValueGetResult = True
2348 expected = valueValue if valueValue is not None else "null"
2349 main.log.debug( "Checking for value of " + expected )
2350 for i in getValues:
2351 if i != expected:
2352 atomicValueGetResult = False
2353 utilities.assert_equals( expect=True,
2354 actual=atomicValueGetResult,
2355 onpass="Atomic Value get successful",
2356 onfail="Error getting atomic Value: expected " +
2357 str( valueValue ) + ", found: " +
2358 str( getValues ) )
2359
2360 main.step( "Atomic Value destory()" )
2361 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002362 ctrl = main.Cluster.next()
2363 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002364 main.log.debug( destroyResult )
2365 # Check the results
2366 utilities.assert_equals( expect=main.TRUE,
2367 actual=destroyResult,
2368 onpass="Atomic Value destroy successful",
2369 onfail="Error destroying atomic Value" )
2370
2371 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002372 getValues = main.Cluster.command( "valueTestGet",
2373 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002374 main.log.debug( getValues )
2375 # Check the results
2376 atomicValueGetResult = True
2377 expected = valueValue if valueValue is not None else "null"
2378 main.log.debug( "Checking for value of " + expected )
2379 for i in getValues:
2380 if i != expected:
2381 atomicValueGetResult = False
2382 utilities.assert_equals( expect=True,
2383 actual=atomicValueGetResult,
2384 onpass="Atomic Value get successful",
2385 onfail="Error getting atomic Value " +
2386 str( valueValue ) + ", found: " +
2387 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002388
2389 # WORK QUEUES
2390 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002391 ctrl = main.Cluster.next()
2392 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002393 workQueuePending += 1
2394 main.log.debug( addResult )
2395 # Check the results
2396 utilities.assert_equals( expect=main.TRUE,
2397 actual=addResult,
2398 onpass="Work Queue add successful",
2399 onfail="Error adding to Work Queue" )
2400
2401 main.step( "Check the work queue stats" )
2402 statsResults = self.workQueueStatsCheck( workQueueName,
2403 workQueueCompleted,
2404 workQueueInProgress,
2405 workQueuePending )
2406 utilities.assert_equals( expect=True,
2407 actual=statsResults,
2408 onpass="Work Queue stats correct",
2409 onfail="Work Queue stats incorrect " )
2410
2411 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002412 ctrl = main.Cluster.next()
2413 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002414 workQueuePending += 2
2415 main.log.debug( addMultipleResult )
2416 # Check the results
2417 utilities.assert_equals( expect=main.TRUE,
2418 actual=addMultipleResult,
2419 onpass="Work Queue add multiple successful",
2420 onfail="Error adding multiple items to Work Queue" )
2421
2422 main.step( "Check the work queue stats" )
2423 statsResults = self.workQueueStatsCheck( workQueueName,
2424 workQueueCompleted,
2425 workQueueInProgress,
2426 workQueuePending )
2427 utilities.assert_equals( expect=True,
2428 actual=statsResults,
2429 onpass="Work Queue stats correct",
2430 onfail="Work Queue stats incorrect " )
2431
2432 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002433 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002434 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002435 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002436 workQueuePending -= number
2437 workQueueCompleted += number
2438 main.log.debug( take1Result )
2439 # Check the results
2440 utilities.assert_equals( expect=main.TRUE,
2441 actual=take1Result,
2442 onpass="Work Queue takeAndComplete 1 successful",
2443 onfail="Error taking 1 from Work Queue" )
2444
2445 main.step( "Check the work queue stats" )
2446 statsResults = self.workQueueStatsCheck( workQueueName,
2447 workQueueCompleted,
2448 workQueueInProgress,
2449 workQueuePending )
2450 utilities.assert_equals( expect=True,
2451 actual=statsResults,
2452 onpass="Work Queue stats correct",
2453 onfail="Work Queue stats incorrect " )
2454
2455 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002456 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002457 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002458 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002459 workQueuePending -= number
2460 workQueueCompleted += number
2461 main.log.debug( take2Result )
2462 # Check the results
2463 utilities.assert_equals( expect=main.TRUE,
2464 actual=take2Result,
2465 onpass="Work Queue takeAndComplete 2 successful",
2466 onfail="Error taking 2 from Work Queue" )
2467
2468 main.step( "Check the work queue stats" )
2469 statsResults = self.workQueueStatsCheck( workQueueName,
2470 workQueueCompleted,
2471 workQueueInProgress,
2472 workQueuePending )
2473 utilities.assert_equals( expect=True,
2474 actual=statsResults,
2475 onpass="Work Queue stats correct",
2476 onfail="Work Queue stats incorrect " )
2477
2478 main.step( "Work Queue destroy()" )
2479 valueValue = None
2480 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002481 ctrl = main.Cluster.next()
2482 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002483 workQueueCompleted = 0
2484 workQueueInProgress = 0
2485 workQueuePending = 0
2486 main.log.debug( destroyResult )
2487 # Check the results
2488 utilities.assert_equals( expect=main.TRUE,
2489 actual=destroyResult,
2490 onpass="Work Queue destroy successful",
2491 onfail="Error destroying Work Queue" )
2492
2493 main.step( "Check the work queue stats" )
2494 statsResults = self.workQueueStatsCheck( workQueueName,
2495 workQueueCompleted,
2496 workQueueInProgress,
2497 workQueuePending )
2498 utilities.assert_equals( expect=True,
2499 actual=statsResults,
2500 onpass="Work Queue stats correct",
2501 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002502 except Exception as e:
2503 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002504
2505 def cleanUp( self, main ):
2506 """
2507 Clean up
2508 """
Devin Lim58046fa2017-07-05 16:55:00 -07002509 assert main, "main not defined"
2510 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002511
2512 # printing colors to terminal
2513 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2514 'blue': '\033[94m', 'green': '\033[92m',
2515 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002516
Devin Lim58046fa2017-07-05 16:55:00 -07002517 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002518
2519 main.step( "Checking raft log size" )
2520 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2521 # get compacted periodically
2522 logCheck = main.Cluster.checkPartitionSize()
2523 utilities.assert_equals( expect=True, actual=logCheck,
2524 onpass="Raft log size is not too big",
2525 onfail="Raft logs grew too big" )
2526
Devin Lim58046fa2017-07-05 16:55:00 -07002527 main.step( "Killing tcpdumps" )
2528 main.Mininet2.stopTcpdump()
2529
2530 testname = main.TEST
2531 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2532 main.step( "Copying MN pcap and ONOS log files to test station" )
2533 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2534 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2535 # NOTE: MN Pcap file is being saved to logdir.
2536 # We scp this file as MN and TestON aren't necessarily the same vm
2537
2538 # FIXME: To be replaced with a Jenkin's post script
2539 # TODO: Load these from params
2540 # NOTE: must end in /
2541 logFolder = "/opt/onos/log/"
2542 logFiles = [ "karaf.log", "karaf.log.1" ]
2543 # NOTE: must end in /
2544 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002545 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002546 dstName = main.logdir + "/" + ctrl.name + "-" + f
2547 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002548 logFolder + f, dstName )
2549 # std*.log's
2550 # NOTE: must end in /
2551 logFolder = "/opt/onos/var/"
2552 logFiles = [ "stderr.log", "stdout.log" ]
2553 # NOTE: must end in /
2554 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002555 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002556 dstName = main.logdir + "/" + ctrl.name + "-" + f
2557 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002558 logFolder + f, dstName )
2559 else:
2560 main.log.debug( "skipping saving log files" )
2561
Jon Hall5d5876e2017-11-30 09:33:16 -08002562 main.step( "Checking ONOS Logs for errors" )
2563 for ctrl in main.Cluster.runningNodes:
2564 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2565 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2566
Devin Lim58046fa2017-07-05 16:55:00 -07002567 main.step( "Stopping Mininet" )
2568 mnResult = main.Mininet1.stopNet()
2569 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2570 onpass="Mininet stopped",
2571 onfail="MN cleanup NOT successful" )
2572
Devin Lim58046fa2017-07-05 16:55:00 -07002573 try:
2574 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2575 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2576 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2577 timerLog.close()
2578 except NameError as e:
2579 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002580
Devin Lim58046fa2017-07-05 16:55:00 -07002581 def assignMastership( self, main ):
2582 """
2583 Assign mastership to controllers
2584 """
2585 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002586 assert main, "main not defined"
2587 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002588
2589 main.case( "Assigning Controller roles for switches" )
2590 main.caseExplanation = "Check that ONOS is connected to each " +\
2591 "device. Then manually assign" +\
2592 " mastership to specific ONOS nodes using" +\
2593 " 'device-role'"
2594 main.step( "Assign mastership of switches to specific controllers" )
2595 # Manually assign mastership to the controller we want
2596 roleCall = main.TRUE
2597
2598 ipList = []
2599 deviceList = []
Devin Lim58046fa2017-07-05 16:55:00 -07002600 try:
2601 # Assign mastership to specific controllers. This assignment was
2602 # determined for a 7 node cluser, but will work with any sized
2603 # cluster
2604 for i in range( 1, 29 ): # switches 1 through 28
2605 # set up correct variables:
2606 if i == 1:
2607 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002608 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002609 deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002610 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002611 c = 1 % main.Cluster.numCtrls
2612 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002613 deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002614 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002615 c = 1 % main.Cluster.numCtrls
2616 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002617 deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002618 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002619 c = 3 % main.Cluster.numCtrls
2620 ip = main.Cluster.active( c ).ip_address # ONOS4
Jon Hall0e240372018-05-02 11:21:57 -07002621 deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002622 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002623 c = 2 % main.Cluster.numCtrls
2624 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002625 deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002626 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002627 c = 2 % main.Cluster.numCtrls
2628 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002629 deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002630 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002631 c = 5 % main.Cluster.numCtrls
2632 ip = main.Cluster.active( c ).ip_address # ONOS6
Jon Hall0e240372018-05-02 11:21:57 -07002633 deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002634 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002635 c = 4 % main.Cluster.numCtrls
2636 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002637 dpid = '3' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002638 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002639 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002640 c = 6 % main.Cluster.numCtrls
2641 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002642 dpid = '6' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002643 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002644 elif i == 28:
2645 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002646 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002647 deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002648 else:
2649 main.log.error( "You didn't write an else statement for " +
2650 "switch s" + str( i ) )
2651 roleCall = main.FALSE
2652 # Assign switch
2653 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2654 # TODO: make this controller dynamic
Jon Hall0e240372018-05-02 11:21:57 -07002655 roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
Devin Lim58046fa2017-07-05 16:55:00 -07002656 ipList.append( ip )
2657 deviceList.append( deviceId )
2658 except ( AttributeError, AssertionError ):
2659 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hall0e240372018-05-02 11:21:57 -07002660 main.log.info( main.Cluster.next().devices() )
Devin Lim58046fa2017-07-05 16:55:00 -07002661 utilities.assert_equals(
2662 expect=main.TRUE,
2663 actual=roleCall,
2664 onpass="Re-assigned switch mastership to designated controller",
2665 onfail="Something wrong with deviceRole calls" )
2666
2667 main.step( "Check mastership was correctly assigned" )
2668 roleCheck = main.TRUE
2669 # NOTE: This is due to the fact that device mastership change is not
2670 # atomic and is actually a multi step process
2671 time.sleep( 5 )
2672 for i in range( len( ipList ) ):
2673 ip = ipList[ i ]
2674 deviceId = deviceList[ i ]
2675 # Check assignment
Jon Hall0e240372018-05-02 11:21:57 -07002676 master = main.Cluster.next().getRole( deviceId ).get( 'master' )
Devin Lim58046fa2017-07-05 16:55:00 -07002677 if ip in master:
2678 roleCheck = roleCheck and main.TRUE
2679 else:
2680 roleCheck = roleCheck and main.FALSE
2681 main.log.error( "Error, controller " + ip + " is not" +
2682 " master " + "of device " +
2683 str( deviceId ) + ". Master is " +
2684 repr( master ) + "." )
2685 utilities.assert_equals(
2686 expect=main.TRUE,
2687 actual=roleCheck,
2688 onpass="Switches were successfully reassigned to designated " +
2689 "controller",
2690 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002691
Jon Hall5d5876e2017-11-30 09:33:16 -08002692 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002693 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002694 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002695 """
2696 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002697 assert main, "main not defined"
2698 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002699 assert main.kill, "main.kill not defined"
2700 main.case( "Restart minority of ONOS nodes" )
2701
2702 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2703 startResults = main.TRUE
2704 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002705 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002706 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002707 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002708 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2709 onpass="ONOS nodes started successfully",
2710 onfail="ONOS nodes NOT successfully started" )
2711
2712 main.step( "Checking if ONOS is up yet" )
2713 count = 0
2714 onosIsupResult = main.FALSE
2715 while onosIsupResult == main.FALSE and count < 10:
2716 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002717 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002718 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002719 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002720 count = count + 1
2721 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2722 onpass="ONOS restarted successfully",
2723 onfail="ONOS restart NOT successful" )
2724
Jon Hall5d5876e2017-11-30 09:33:16 -08002725 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002726 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002727 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002728 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002729 ctrl.startOnosCli( ctrl.ipAddress )
2730 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002731 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002732 onpass="ONOS node(s) restarted",
2733 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002734
Jon Hall5d5876e2017-11-30 09:33:16 -08002735 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002736 main.restartTime = time.time() - restartTime
2737 main.log.debug( "Restart time: " + str( main.restartTime ) )
2738 # TODO: MAke this configurable. Also, we are breaking the above timer
2739 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002740 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002741 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002742 sleep=15,
2743 attempts=5 )
2744
2745 utilities.assert_equals( expect=True, actual=nodeResults,
2746 onpass="Nodes check successful",
2747 onfail="Nodes check NOT successful" )
2748
2749 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002750 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002751 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002752 ctrl.name,
2753 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002754 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002755 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002756
Jon Hallca319892017-06-15 15:25:22 -07002757 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002758
2759 main.step( "Rerun for election on the node(s) that were killed" )
2760 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002761 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002762 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002763 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002764 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2765 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002766 onfail="Error rerunning for election" )
2767
2768 def upgradeNodes( self, main ):
2769 """
2770 Reinstall some nodes with an upgraded version.
2771
2772 This will reinstall nodes in main.kill with an upgraded version.
2773 """
2774 import time
2775 assert main, "main not defined"
2776 assert utilities.assert_equals, "utilities.assert_equals not defined"
2777 assert main.kill, "main.kill not defined"
2778 nodeNames = [ node.name for node in main.kill ]
2779 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2780
2781 stopResults = main.TRUE
2782 uninstallResults = main.TRUE
2783 startResults = main.TRUE
2784 sshResults = main.TRUE
2785 isup = main.TRUE
2786 restartTime = time.time()
2787 for ctrl in main.kill:
2788 stopResults = stopResults and\
2789 ctrl.onosStop( ctrl.ipAddress )
2790 uninstallResults = uninstallResults and\
2791 ctrl.onosUninstall( ctrl.ipAddress )
2792 # Install the new version of onos
2793 startResults = startResults and\
2794 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2795 sshResults = sshResults and\
2796 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2797 isup = isup and ctrl.isup( ctrl.ipAddress )
2798 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2799 onpass="ONOS nodes stopped successfully",
2800 onfail="ONOS nodes NOT successfully stopped" )
2801 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2802 onpass="ONOS nodes uninstalled successfully",
2803 onfail="ONOS nodes NOT successfully uninstalled" )
2804 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2805 onpass="ONOS nodes started successfully",
2806 onfail="ONOS nodes NOT successfully started" )
2807 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2808 onpass="Successfully secured onos ssh",
2809 onfail="Failed to secure onos ssh" )
2810 utilities.assert_equals( expect=main.TRUE, actual=isup,
2811 onpass="ONOS nodes fully started",
2812 onfail="ONOS nodes NOT fully started" )
2813
2814 main.step( "Restarting ONOS CLI" )
2815 cliResults = main.TRUE
2816 for ctrl in main.kill:
2817 cliResults = cliResults and\
2818 ctrl.startOnosCli( ctrl.ipAddress )
2819 ctrl.active = True
2820 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2821 onpass="ONOS node(s) restarted",
2822 onfail="ONOS node(s) did not restart" )
2823
2824 # Grab the time of restart so we can have some idea of average time
2825 main.restartTime = time.time() - restartTime
2826 main.log.debug( "Restart time: " + str( main.restartTime ) )
2827 # TODO: Make this configurable.
2828 main.step( "Checking ONOS nodes" )
2829 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2830 False,
2831 sleep=15,
2832 attempts=5 )
2833
2834 utilities.assert_equals( expect=True, actual=nodeResults,
2835 onpass="Nodes check successful",
2836 onfail="Nodes check NOT successful" )
2837
2838 if not nodeResults:
2839 for ctrl in main.Cluster.active():
2840 main.log.debug( "{} components not ACTIVE: \n{}".format(
2841 ctrl.name,
2842 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
2843 main.log.error( "Failed to start ONOS, stopping test" )
2844 main.cleanAndExit()
2845
2846 self.commonChecks()
2847
2848 main.step( "Rerun for election on the node(s) that were killed" )
2849 runResults = main.TRUE
2850 for ctrl in main.kill:
2851 runResults = runResults and\
2852 ctrl.electionTestRun()
2853 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2854 onpass="ONOS nodes reran for election topic",
2855 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002856
Devin Lim142b5342017-07-20 15:22:39 -07002857 def tempCell( self, cellName, ipList ):
2858 main.step( "Create cell file" )
2859 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002860
Devin Lim142b5342017-07-20 15:22:39 -07002861 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2862 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002863 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002864 main.step( "Applying cell variable to environment" )
2865 cellResult = main.ONOSbench.setCell( cellName )
2866 verifyResult = main.ONOSbench.verifyCell()
2867
Devin Lim142b5342017-07-20 15:22:39 -07002868 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002869 """
2870 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002871 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002872 1: scaling
2873 """
2874 """
2875 Check state after ONOS failure/scaling
2876 """
2877 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002878 assert main, "main not defined"
2879 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002880 main.case( "Running ONOS Constant State Tests" )
2881
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002882 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002883
Devin Lim58046fa2017-07-05 16:55:00 -07002884 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002885 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002886
Devin Lim142b5342017-07-20 15:22:39 -07002887 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002888 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002889
2890 if rolesResults and not consistentMastership:
2891 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002892 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002893 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002894 json.dumps( json.loads( ONOSMastership[ i ] ),
2895 sort_keys=True,
2896 indent=4,
2897 separators=( ',', ': ' ) ) )
2898
2899 if compareSwitch:
2900 description2 = "Compare switch roles from before failure"
2901 main.step( description2 )
2902 try:
2903 currentJson = json.loads( ONOSMastership[ 0 ] )
2904 oldJson = json.loads( mastershipState )
2905 except ( ValueError, TypeError ):
2906 main.log.exception( "Something is wrong with parsing " +
2907 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002908 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2909 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002910 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002911 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002912 for swName, swDetails in main.Mininet1.getSwitches().items():
2913 switchDPID = swDetails[ 'dpid' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002914 current = [ switch[ 'master' ] for switch in currentJson
2915 if switchDPID in switch[ 'id' ] ]
2916 old = [ switch[ 'master' ] for switch in oldJson
2917 if switchDPID in switch[ 'id' ] ]
2918 if current == old:
2919 mastershipCheck = mastershipCheck and main.TRUE
2920 else:
2921 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2922 mastershipCheck = main.FALSE
2923 utilities.assert_equals(
2924 expect=main.TRUE,
2925 actual=mastershipCheck,
2926 onpass="Mastership of Switches was not changed",
2927 onfail="Mastership of some switches changed" )
2928
2929 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002930 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002931 intentCheck = main.FALSE
2932 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002933
2934 main.step( "Check for consistency in Intents from each controller" )
2935 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2936 main.log.info( "Intents are consistent across all ONOS " +
2937 "nodes" )
2938 else:
2939 consistentIntents = False
2940
2941 # Try to make it easy to figure out what is happening
2942 #
2943 # Intent ONOS1 ONOS2 ...
2944 # 0x01 INSTALLED INSTALLING
2945 # ... ... ...
2946 # ... ... ...
2947 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002948 for ctrl in main.Cluster.active():
2949 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002950 main.log.warn( title )
2951 # get all intent keys in the cluster
2952 keys = []
2953 for nodeStr in ONOSIntents:
2954 node = json.loads( nodeStr )
2955 for intent in node:
2956 keys.append( intent.get( 'id' ) )
2957 keys = set( keys )
2958 for key in keys:
2959 row = "%-13s" % key
2960 for nodeStr in ONOSIntents:
2961 node = json.loads( nodeStr )
2962 for intent in node:
2963 if intent.get( 'id' ) == key:
2964 row += "%-15s" % intent.get( 'state' )
2965 main.log.warn( row )
2966 # End table view
2967
2968 utilities.assert_equals(
2969 expect=True,
2970 actual=consistentIntents,
2971 onpass="Intents are consistent across all ONOS nodes",
2972 onfail="ONOS nodes have different views of intents" )
2973 intentStates = []
2974 for node in ONOSIntents: # Iter through ONOS nodes
2975 nodeStates = []
2976 # Iter through intents of a node
2977 try:
2978 for intent in json.loads( node ):
2979 nodeStates.append( intent[ 'state' ] )
2980 except ( ValueError, TypeError ):
2981 main.log.exception( "Error in parsing intents" )
2982 main.log.error( repr( node ) )
2983 intentStates.append( nodeStates )
2984 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2985 main.log.info( dict( out ) )
2986
2987 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002988 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002989 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002990 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002991 main.log.warn( json.dumps(
2992 json.loads( ONOSIntents[ i ] ),
2993 sort_keys=True,
2994 indent=4,
2995 separators=( ',', ': ' ) ) )
2996 elif intentsResults and consistentIntents:
2997 intentCheck = main.TRUE
2998
2999 # NOTE: Store has no durability, so intents are lost across system
3000 # restarts
3001 if not isRestart:
3002 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
3003 # NOTE: this requires case 5 to pass for intentState to be set.
3004 # maybe we should stop the test if that fails?
3005 sameIntents = main.FALSE
3006 try:
3007 intentState
3008 except NameError:
3009 main.log.warn( "No previous intent state was saved" )
3010 else:
3011 if intentState and intentState == ONOSIntents[ 0 ]:
3012 sameIntents = main.TRUE
3013 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
3014 # TODO: possibly the states have changed? we may need to figure out
3015 # what the acceptable states are
3016 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
3017 sameIntents = main.TRUE
3018 try:
3019 before = json.loads( intentState )
3020 after = json.loads( ONOSIntents[ 0 ] )
3021 for intent in before:
3022 if intent not in after:
3023 sameIntents = main.FALSE
3024 main.log.debug( "Intent is not currently in ONOS " +
3025 "(at least in the same form):" )
3026 main.log.debug( json.dumps( intent ) )
3027 except ( ValueError, TypeError ):
3028 main.log.exception( "Exception printing intents" )
3029 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3030 main.log.debug( repr( intentState ) )
3031 if sameIntents == main.FALSE:
3032 try:
3033 main.log.debug( "ONOS intents before: " )
3034 main.log.debug( json.dumps( json.loads( intentState ),
3035 sort_keys=True, indent=4,
3036 separators=( ',', ': ' ) ) )
3037 main.log.debug( "Current ONOS intents: " )
3038 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3039 sort_keys=True, indent=4,
3040 separators=( ',', ': ' ) ) )
3041 except ( ValueError, TypeError ):
3042 main.log.exception( "Exception printing intents" )
3043 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3044 main.log.debug( repr( intentState ) )
3045 utilities.assert_equals(
3046 expect=main.TRUE,
3047 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003048 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003049 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3050 intentCheck = intentCheck and sameIntents
3051
3052 main.step( "Get the OF Table entries and compare to before " +
3053 "component " + OnosAfterWhich[ afterWhich ] )
3054 FlowTables = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08003055 for switch in main.Mininet1.getSwitches().keys():
3056 main.log.info( "Checking flow table on " + switch )
3057 tmpFlows = main.Mininet1.getFlowTable( switch, version="1.3", debug=False )
3058 curSwitch = main.Mininet1.flowTableComp( flows[ switch ], tmpFlows )
Devin Lim58046fa2017-07-05 16:55:00 -07003059 FlowTables = FlowTables and curSwitch
3060 if curSwitch == main.FALSE:
Jon Hallab611372018-02-21 15:26:05 -08003061 main.log.warn( "Differences in flow table for switch: {}".format( switch ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003062 utilities.assert_equals(
3063 expect=main.TRUE,
3064 actual=FlowTables,
3065 onpass="No changes were found in the flow tables",
3066 onfail="Changes were found in the flow tables" )
3067
Jon Hallca319892017-06-15 15:25:22 -07003068 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003069 """
3070 main.step( "Check the continuous pings to ensure that no packets " +
3071 "were dropped during component failure" )
3072 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3073 main.params[ 'TESTONIP' ] )
3074 LossInPings = main.FALSE
3075 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3076 for i in range( 8, 18 ):
3077 main.log.info(
3078 "Checking for a loss in pings along flow from s" +
3079 str( i ) )
3080 LossInPings = main.Mininet2.checkForLoss(
3081 "/tmp/ping.h" +
3082 str( i ) ) or LossInPings
3083 if LossInPings == main.TRUE:
3084 main.log.info( "Loss in ping detected" )
3085 elif LossInPings == main.ERROR:
3086 main.log.info( "There are multiple mininet process running" )
3087 elif LossInPings == main.FALSE:
3088 main.log.info( "No Loss in the pings" )
3089 main.log.info( "No loss of dataplane connectivity" )
3090 utilities.assert_equals(
3091 expect=main.FALSE,
3092 actual=LossInPings,
3093 onpass="No Loss of connectivity",
3094 onfail="Loss of dataplane connectivity detected" )
3095 # NOTE: Since intents are not persisted with IntnentStore,
3096 # we expect loss in dataplane connectivity
3097 LossInPings = main.FALSE
3098 """
Devin Lim58046fa2017-07-05 16:55:00 -07003099 def compareTopo( self, main ):
3100 """
3101 Compare topo
3102 """
3103 import json
3104 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003105 assert main, "main not defined"
3106 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003107 try:
3108 from tests.dependencies.topology import Topology
3109 except ImportError:
3110 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003111 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003112 try:
3113 main.topoRelated
3114 except ( NameError, AttributeError ):
3115 main.topoRelated = Topology()
3116 main.case( "Compare ONOS Topology view to Mininet topology" )
3117 main.caseExplanation = "Compare topology objects between Mininet" +\
3118 " and ONOS"
3119 topoResult = main.FALSE
3120 topoFailMsg = "ONOS topology don't match Mininet"
3121 elapsed = 0
3122 count = 0
3123 main.step( "Comparing ONOS topology to MN topology" )
3124 startTime = time.time()
3125 # Give time for Gossip to work
3126 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3127 devicesResults = main.TRUE
3128 linksResults = main.TRUE
3129 hostsResults = main.TRUE
3130 hostAttachmentResults = True
3131 count += 1
3132 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003133 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003134 kwargs={ 'sleep': 5, 'attempts': 5,
3135 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003136 ipResult = main.TRUE
3137
Devin Lim142b5342017-07-20 15:22:39 -07003138 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003139 kwargs={ 'sleep': 5, 'attempts': 5,
3140 'randomTime': True },
3141 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003142
3143 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003144 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003145 if hosts[ controller ]:
3146 for host in hosts[ controller ]:
3147 if host is None or host.get( 'ipAddresses', [] ) == []:
3148 main.log.error(
3149 "Error with host ipAddresses on controller" +
3150 controllerStr + ": " + str( host ) )
3151 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003152 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003153 kwargs={ 'sleep': 5, 'attempts': 5,
3154 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003155 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003156 kwargs={ 'sleep': 5, 'attempts': 5,
3157 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003158 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003159 kwargs={ 'sleep': 5, 'attempts': 5,
3160 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003161
3162 elapsed = time.time() - startTime
3163 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003164 main.log.debug( "Elapsed time: " + str( elapsed ) )
3165 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003166
3167 if all( e is None for e in devices ) and\
3168 all( e is None for e in hosts ) and\
3169 all( e is None for e in ports ) and\
3170 all( e is None for e in links ) and\
3171 all( e is None for e in clusters ):
3172 topoFailMsg = "Could not get topology from ONOS"
3173 main.log.error( topoFailMsg )
3174 continue # Try again, No use trying to compare
3175
3176 mnSwitches = main.Mininet1.getSwitches()
3177 mnLinks = main.Mininet1.getLinks()
3178 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003179 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003180 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003181 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3182 controller,
3183 mnSwitches,
3184 devices,
3185 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003186 utilities.assert_equals( expect=main.TRUE,
3187 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003188 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003189 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003190 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003191 " Switches view is incorrect" )
3192
Devin Lim58046fa2017-07-05 16:55:00 -07003193 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003194 main.Mininet1.compareLinks,
3195 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003196 utilities.assert_equals( expect=main.TRUE,
3197 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003198 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003199 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003200 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003201 " links view is incorrect" )
3202 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3203 currentHostsResult = main.Mininet1.compareHosts(
3204 mnHosts,
3205 hosts[ controller ] )
3206 elif hosts[ controller ] == []:
3207 currentHostsResult = main.TRUE
3208 else:
3209 currentHostsResult = main.FALSE
3210 utilities.assert_equals( expect=main.TRUE,
3211 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003212 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003213 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003214 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003215 " hosts don't match Mininet" )
Devin Lim58046fa2017-07-05 16:55:00 -07003216 hostAttachment = True
Jon Hallab611372018-02-21 15:26:05 -08003217 if main.topoMappings:
3218 ctrl = main.Cluster.next()
3219 # CHECKING HOST ATTACHMENT POINTS
3220 zeroHosts = False
3221 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3222 if hosts[ controller ] == []:
3223 main.log.warn( "There are no hosts discovered" )
3224 zeroHosts = True
3225 else:
3226 for host in hosts[ controller ]:
3227 mac = None
3228 locations = []
3229 device = None
3230 port = None
3231 try:
3232 mac = host.get( 'mac' )
3233 assert mac, "mac field could not be found for this host object"
3234 if 'locations' in host:
3235 locations = host.get( 'locations' )
3236 elif 'location' in host:
3237 locations.append( host.get( 'location' ) )
3238 assert locations, "locations field could not be found for this host object"
Devin Lim58046fa2017-07-05 16:55:00 -07003239
Jon Hallab611372018-02-21 15:26:05 -08003240 # Trim the protocol identifier off deviceId
3241 device = str( locations[0].get( 'elementId' ) ).split( ':' )[ 1 ]
3242 assert device, "elementId field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003243
Jon Hallab611372018-02-21 15:26:05 -08003244 port = locations[0].get( 'port' )
3245 assert port, "port field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003246
Jon Hallab611372018-02-21 15:26:05 -08003247 # Now check if this matches where they should be
3248 if mac and device and port:
3249 if str( port ) != "1":
3250 main.log.error( "The attachment port is incorrect for " +
3251 "host " + str( mac ) +
3252 ". Expected: 1 Actual: " + str( port ) )
3253 hostAttachment = False
3254 if device != main.topoMappings[ str( mac ) ]:
3255 main.log.error( "The attachment device is incorrect for " +
3256 "host " + str( mac ) +
3257 ". Expected: " + main.topoMppings[ str( mac ) ] +
3258 " Actual: " + device )
3259 hostAttachment = False
3260 else:
Devin Lim58046fa2017-07-05 16:55:00 -07003261 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003262 except ( AssertionError, TypeError ):
3263 main.log.exception( "Json object not as expected" )
3264 main.log.error( repr( host ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003265 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003266 else:
3267 main.log.error( "No hosts json output or \"Error\"" +
3268 " in output. hosts = " +
3269 repr( hosts[ controller ] ) )
3270 if zeroHosts is False:
3271 # TODO: Find a way to know if there should be hosts in a
3272 # given point of the test
3273 hostAttachment = True
Devin Lim58046fa2017-07-05 16:55:00 -07003274
Jon Hallab611372018-02-21 15:26:05 -08003275 # END CHECKING HOST ATTACHMENT POINTS
Devin Lim58046fa2017-07-05 16:55:00 -07003276 devicesResults = devicesResults and currentDevicesResult
3277 linksResults = linksResults and currentLinksResult
3278 hostsResults = hostsResults and currentHostsResult
3279 hostAttachmentResults = hostAttachmentResults and\
3280 hostAttachment
3281 topoResult = ( devicesResults and linksResults
3282 and hostsResults and ipResult and
3283 hostAttachmentResults )
3284 utilities.assert_equals( expect=True,
3285 actual=topoResult,
3286 onpass="ONOS topology matches Mininet",
3287 onfail=topoFailMsg )
3288 # End of While loop to pull ONOS state
3289
3290 # Compare json objects for hosts and dataplane clusters
3291
3292 # hosts
3293 main.step( "Hosts view is consistent across all ONOS nodes" )
3294 consistentHostsResult = main.TRUE
3295 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003296 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003297 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3298 if hosts[ controller ] == hosts[ 0 ]:
3299 continue
3300 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003301 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003302 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003303 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 consistentHostsResult = main.FALSE
3305
3306 else:
Jon Hallca319892017-06-15 15:25:22 -07003307 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003308 controllerStr )
3309 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003310 main.log.debug( controllerStr +
3311 " hosts response: " +
3312 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003313 utilities.assert_equals(
3314 expect=main.TRUE,
3315 actual=consistentHostsResult,
3316 onpass="Hosts view is consistent across all ONOS nodes",
3317 onfail="ONOS nodes have different views of hosts" )
3318
3319 main.step( "Hosts information is correct" )
3320 hostsResults = hostsResults and ipResult
3321 utilities.assert_equals(
3322 expect=main.TRUE,
3323 actual=hostsResults,
3324 onpass="Host information is correct",
3325 onfail="Host information is incorrect" )
3326
3327 main.step( "Host attachment points to the network" )
3328 utilities.assert_equals(
3329 expect=True,
3330 actual=hostAttachmentResults,
3331 onpass="Hosts are correctly attached to the network",
3332 onfail="ONOS did not correctly attach hosts to the network" )
3333
3334 # Strongly connected clusters of devices
3335 main.step( "Clusters view is consistent across all ONOS nodes" )
3336 consistentClustersResult = main.TRUE
3337 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003338 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003339 if "Error" not in clusters[ controller ]:
3340 if clusters[ controller ] == clusters[ 0 ]:
3341 continue
3342 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003343 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003344 controllerStr +
3345 " is inconsistent with ONOS1" )
3346 consistentClustersResult = main.FALSE
3347 else:
3348 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003349 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003350 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003351 main.log.debug( controllerStr +
3352 " clusters response: " +
3353 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003354 utilities.assert_equals(
3355 expect=main.TRUE,
3356 actual=consistentClustersResult,
3357 onpass="Clusters view is consistent across all ONOS nodes",
3358 onfail="ONOS nodes have different views of clusters" )
3359 if not consistentClustersResult:
3360 main.log.debug( clusters )
3361 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003362 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003363
3364 main.step( "There is only one SCC" )
3365 # there should always only be one cluster
3366 try:
3367 numClusters = len( json.loads( clusters[ 0 ] ) )
3368 except ( ValueError, TypeError ):
3369 main.log.exception( "Error parsing clusters[0]: " +
3370 repr( clusters[ 0 ] ) )
3371 numClusters = "ERROR"
3372 clusterResults = main.FALSE
3373 if numClusters == 1:
3374 clusterResults = main.TRUE
3375 utilities.assert_equals(
3376 expect=1,
3377 actual=numClusters,
3378 onpass="ONOS shows 1 SCC",
3379 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3380
3381 topoResult = ( devicesResults and linksResults
3382 and hostsResults and consistentHostsResult
3383 and consistentClustersResult and clusterResults
3384 and ipResult and hostAttachmentResults )
3385
3386 topoResult = topoResult and int( count <= 2 )
3387 note = "note it takes about " + str( int( cliTime ) ) + \
3388 " seconds for the test to make all the cli calls to fetch " +\
3389 "the topology from each ONOS instance"
3390 main.log.info(
3391 "Very crass estimate for topology discovery/convergence( " +
3392 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3393 str( count ) + " tries" )
3394
3395 main.step( "Device information is correct" )
3396 utilities.assert_equals(
3397 expect=main.TRUE,
3398 actual=devicesResults,
3399 onpass="Device information is correct",
3400 onfail="Device information is incorrect" )
3401
3402 main.step( "Links are correct" )
3403 utilities.assert_equals(
3404 expect=main.TRUE,
3405 actual=linksResults,
3406 onpass="Link are correct",
3407 onfail="Links are incorrect" )
3408
3409 main.step( "Hosts are correct" )
3410 utilities.assert_equals(
3411 expect=main.TRUE,
3412 actual=hostsResults,
3413 onpass="Hosts are correct",
3414 onfail="Hosts are incorrect" )
3415
3416 # FIXME: move this to an ONOS state case
3417 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003418 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003419 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003420 attempts=5 )
3421 utilities.assert_equals( expect=True, actual=nodeResults,
3422 onpass="Nodes check successful",
3423 onfail="Nodes check NOT successful" )
3424 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003425 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003426 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003427 ctrl.name,
3428 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003429
3430 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003431 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003432
Jon Hallab611372018-02-21 15:26:05 -08003433 def linkDown( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003434 """
Jon Hallab611372018-02-21 15:26:05 -08003435 Link src-dst down
Devin Lim58046fa2017-07-05 16:55:00 -07003436 """
3437 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003438 assert main, "main not defined"
3439 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003440 # NOTE: You should probably run a topology check after this
3441
3442 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3443
3444 description = "Turn off a link to ensure that Link Discovery " +\
3445 "is working properly"
3446 main.case( description )
3447
Jon Hallab611372018-02-21 15:26:05 -08003448 main.step( "Kill Link between " + src + " and " + dst )
3449 LinkDown = main.Mininet1.link( END1=src, END2=dst, OPTION="down" )
Devin Lim58046fa2017-07-05 16:55:00 -07003450 main.log.info( "Waiting " + str( linkSleep ) +
3451 " seconds for link down to be discovered" )
3452 time.sleep( linkSleep )
3453 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3454 onpass="Link down successful",
3455 onfail="Failed to bring link down" )
3456 # TODO do some sort of check here
3457
Jon Hallab611372018-02-21 15:26:05 -08003458 def linkUp( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003459 """
Jon Hallab611372018-02-21 15:26:05 -08003460 Link src-dst up
Devin Lim58046fa2017-07-05 16:55:00 -07003461 """
3462 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003463 assert main, "main not defined"
3464 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003465 # NOTE: You should probably run a topology check after this
3466
3467 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3468
3469 description = "Restore a link to ensure that Link Discovery is " + \
3470 "working properly"
3471 main.case( description )
3472
Jon Hallab611372018-02-21 15:26:05 -08003473 main.step( "Bring link between " + src + " and " + dst + " back up" )
3474 LinkUp = main.Mininet1.link( END1=src, END2=dst, OPTION="up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003475 main.log.info( "Waiting " + str( linkSleep ) +
3476 " seconds for link up to be discovered" )
3477 time.sleep( linkSleep )
3478 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3479 onpass="Link up successful",
3480 onfail="Failed to bring link up" )
3481
3482 def switchDown( self, main ):
3483 """
3484 Switch Down
3485 """
3486 # NOTE: You should probably run a topology check after this
3487 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003488 assert main, "main not defined"
3489 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003490
3491 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3492
3493 description = "Killing a switch to ensure it is discovered correctly"
Devin Lim58046fa2017-07-05 16:55:00 -07003494 main.case( description )
3495 switch = main.params[ 'kill' ][ 'switch' ]
3496 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3497
3498 # TODO: Make this switch parameterizable
3499 main.step( "Kill " + switch )
3500 main.log.info( "Deleting " + switch )
3501 main.Mininet1.delSwitch( switch )
3502 main.log.info( "Waiting " + str( switchSleep ) +
3503 " seconds for switch down to be discovered" )
3504 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003505 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003506 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003507 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003508 result = main.FALSE
3509 if device and device[ 'available' ] is False:
3510 result = main.TRUE
3511 utilities.assert_equals( expect=main.TRUE, actual=result,
3512 onpass="Kill switch successful",
3513 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003514
Devin Lim58046fa2017-07-05 16:55:00 -07003515 def switchUp( self, main ):
3516 """
3517 Switch Up
3518 """
3519 # NOTE: You should probably run a topology check after this
3520 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003521 assert main, "main not defined"
3522 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003523
3524 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3525 switch = main.params[ 'kill' ][ 'switch' ]
3526 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3527 links = main.params[ 'kill' ][ 'links' ].split()
Devin Lim58046fa2017-07-05 16:55:00 -07003528 description = "Adding a switch to ensure it is discovered correctly"
3529 main.case( description )
3530
3531 main.step( "Add back " + switch )
3532 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3533 for peer in links:
3534 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003535 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003536 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3537 main.log.info( "Waiting " + str( switchSleep ) +
3538 " seconds for switch up to be discovered" )
3539 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003540 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003541 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003542 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003543 result = main.FALSE
3544 if device and device[ 'available' ]:
3545 result = main.TRUE
3546 utilities.assert_equals( expect=main.TRUE, actual=result,
3547 onpass="add switch successful",
3548 onfail="Failed to add switch?" )
3549
3550 def startElectionApp( self, main ):
3551 """
3552 start election app on all onos nodes
3553 """
Devin Lim58046fa2017-07-05 16:55:00 -07003554 assert main, "main not defined"
3555 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003556
3557 main.case( "Start Leadership Election app" )
3558 main.step( "Install leadership election app" )
Jon Hall0e240372018-05-02 11:21:57 -07003559 appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003560 utilities.assert_equals(
3561 expect=main.TRUE,
3562 actual=appResult,
3563 onpass="Election app installed",
3564 onfail="Something went wrong with installing Leadership election" )
3565
3566 main.step( "Run for election on each node" )
Jon Hall0e240372018-05-02 11:21:57 -07003567 main.Cluster.next().electionTestRun()
Jon Hallca319892017-06-15 15:25:22 -07003568 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003569 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003570 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003571 utilities.assert_equals(
3572 expect=True,
3573 actual=sameResult,
3574 onpass="All nodes see the same leaderboards",
3575 onfail="Inconsistent leaderboards" )
3576
3577 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003578 # Check that the leader is one of the active nodes
3579 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003580 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003581 if leader in ips:
3582 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003583 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003584 legitimate = False
3585 main.log.debug( leaders )
3586 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003587 utilities.assert_equals(
3588 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003589 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003590 onpass="Correct leader was elected",
3591 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003592 main.Cluster.testLeader = leader
3593
Devin Lim58046fa2017-07-05 16:55:00 -07003594 def isElectionFunctional( self, main ):
3595 """
3596 Check that Leadership Election is still functional
3597 15.1 Run election on each node
3598 15.2 Check that each node has the same leaders and candidates
3599 15.3 Find current leader and withdraw
3600 15.4 Check that a new node was elected leader
3601 15.5 Check that that new leader was the candidate of old leader
3602 15.6 Run for election on old leader
3603 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3604 15.8 Make sure that the old leader was added to the candidate list
3605
3606 old and new variable prefixes refer to data from before vs after
3607 withdrawl and later before withdrawl vs after re-election
3608 """
3609 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003610 assert main, "main not defined"
3611 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003612
3613 description = "Check that Leadership Election is still functional"
3614 main.case( description )
3615 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3616
3617 oldLeaders = [] # list of lists of each nodes' candidates before
3618 newLeaders = [] # list of lists of each nodes' candidates after
3619 oldLeader = '' # the old leader from oldLeaders, None if not same
3620 newLeader = '' # the new leaders fron newLoeaders, None if not same
3621 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3622 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003623 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003624 expectNoLeader = True
3625
3626 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003627 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003628 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003629 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003630 actual=electionResult,
3631 onpass="All nodes successfully ran for leadership",
3632 onfail="At least one node failed to run for leadership" )
3633
3634 if electionResult == main.FALSE:
3635 main.log.error(
3636 "Skipping Test Case because Election Test App isn't loaded" )
3637 main.skipCase()
3638
3639 main.step( "Check that each node shows the same leader and candidates" )
3640 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003641 activeCLIs = main.Cluster.active()
3642 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003643 if sameResult:
3644 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003645 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003646 else:
3647 oldLeader = None
3648 utilities.assert_equals(
3649 expect=True,
3650 actual=sameResult,
3651 onpass="Leaderboards are consistent for the election topic",
3652 onfail=failMessage )
3653
3654 main.step( "Find current leader and withdraw" )
3655 withdrawResult = main.TRUE
3656 # do some sanity checking on leader before using it
3657 if oldLeader is None:
3658 main.log.error( "Leadership isn't consistent." )
3659 withdrawResult = main.FALSE
3660 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003661 for ctrl in main.Cluster.active():
3662 if oldLeader == ctrl.ipAddress:
3663 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003664 break
3665 else: # FOR/ELSE statement
3666 main.log.error( "Leader election, could not find current leader" )
3667 if oldLeader:
3668 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3669 utilities.assert_equals(
3670 expect=main.TRUE,
3671 actual=withdrawResult,
3672 onpass="Node was withdrawn from election",
3673 onfail="Node was not withdrawn from election" )
3674
3675 main.step( "Check that a new node was elected leader" )
3676 failMessage = "Nodes have different leaders"
3677 # Get new leaders and candidates
3678 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3679 newLeader = None
3680 if newLeaderResult:
3681 if newLeaders[ 0 ][ 0 ] == 'none':
3682 main.log.error( "No leader was elected on at least 1 node" )
3683 if not expectNoLeader:
3684 newLeaderResult = False
3685 newLeader = newLeaders[ 0 ][ 0 ]
3686
3687 # Check that the new leader is not the older leader, which was withdrawn
3688 if newLeader == oldLeader:
3689 newLeaderResult = False
3690 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3691 " as the current leader" )
3692 utilities.assert_equals(
3693 expect=True,
3694 actual=newLeaderResult,
3695 onpass="Leadership election passed",
3696 onfail="Something went wrong with Leadership election" )
3697
3698 main.step( "Check that that new leader was the candidate of old leader" )
3699 # candidates[ 2 ] should become the top candidate after withdrawl
3700 correctCandidateResult = main.TRUE
3701 if expectNoLeader:
3702 if newLeader == 'none':
3703 main.log.info( "No leader expected. None found. Pass" )
3704 correctCandidateResult = main.TRUE
3705 else:
3706 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3707 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003708 utilities.assert_equals(
3709 expect=main.TRUE,
3710 actual=correctCandidateResult,
3711 onpass="Correct Candidate Elected",
3712 onfail="Incorrect Candidate Elected" )
3713
3714 main.step( "Run for election on old leader( just so everyone " +
3715 "is in the hat )" )
3716 if oldLeaderCLI is not None:
3717 runResult = oldLeaderCLI.electionTestRun()
3718 else:
3719 main.log.error( "No old leader to re-elect" )
3720 runResult = main.FALSE
3721 utilities.assert_equals(
3722 expect=main.TRUE,
3723 actual=runResult,
3724 onpass="App re-ran for election",
3725 onfail="App failed to run for election" )
3726
3727 main.step(
3728 "Check that oldLeader is a candidate, and leader if only 1 node" )
3729 # verify leader didn't just change
3730 # Get new leaders and candidates
3731 reRunLeaders = []
3732 time.sleep( 5 ) # Paremterize
3733 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3734
Devin Lim58046fa2017-07-05 16:55:00 -07003735 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003736 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003737 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003738 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003739 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003740 assert main, "main not defined"
3741 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003742
3743 # Variables for the distributed primitives tests
3744 main.pCounterName = "TestON-Partitions"
3745 main.pCounterValue = 0
3746 main.onosSet = set( [] )
3747 main.onosSetName = "TestON-set"
3748
3749 description = "Install Primitives app"
3750 main.case( description )
3751 main.step( "Install Primitives app" )
3752 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003753 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003754 utilities.assert_equals( expect=main.TRUE,
3755 actual=appResults,
3756 onpass="Primitives app activated",
3757 onfail="Primitives app not activated" )
3758 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003759 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003760
3761 def upgradeInit( self, main ):
3762 '''
3763 Initiates an update
3764 '''
3765 main.step( "Send the command to initialize the upgrade" )
3766 ctrl = main.Cluster.next().CLI
3767 initialized = ctrl.issuInit()
3768 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3769 onpass="ISSU initialized",
3770 onfail="Error initializing the upgrade" )
3771
3772 main.step( "Check the status of the upgrade" )
3773 ctrl = main.Cluster.next().CLI
3774 status = ctrl.issu()
3775 main.log.debug( status )
3776 # TODO: check things here?
3777
3778 main.step( "Checking ONOS nodes" )
3779 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3780 False,
3781 sleep=15,
3782 attempts=5 )
3783 utilities.assert_equals( expect=True, actual=nodeResults,
3784 onpass="Nodes check successful",
3785 onfail="Nodes check NOT successful" )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003786
3787 def backupData( self, main, location ):
3788 """
3789 Backs up ONOS data and logs to a given location on each active node in a cluster
3790 """
3791 result = True
3792 for ctrl in main.Cluster.active():
3793 try:
3794 ctrl.server.handle.sendline( "rm " + location )
3795 ctrl.server.handle.expect( ctrl.server.prompt )
3796 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3797 except pexpect.ExceptionPexpect as e:
3798 main.log.error( e )
3799 main.cleanAndExit()
3800 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3801 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3802 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3803 return result
3804
3805 def restoreData( self, main, location ):
3806 """
3807 Restores ONOS data and logs from a given location on each node in a cluster
3808 """
3809 result = True
3810 for ctrl in main.Cluster.controllers:
3811 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3812 return result
Jon Hallab611372018-02-21 15:26:05 -08003813
3814 def startTopology( self, main ):
3815 """
3816 Starts Mininet using a topology file after pushing a network config file to ONOS.
3817 """
3818 import json
3819 import time
3820 main.case( "Starting Mininet Topology" )
3821
3822 main.step( "Pushing Network config" )
3823 ctrl = main.Cluster.next()
3824 cfgPath = main.testsRoot + main.params[ 'topology' ][ 'configPath' ]
3825 cfgResult = ctrl.onosNetCfg( ctrl.ipAddress,
3826 path=cfgPath,
3827 fileName=main.params[ 'topology' ][ 'configName' ] )
3828 utilities.assert_equals( expect=main.TRUE, actual=cfgResult,
3829 onpass="Pushed Network Configuration to ONOS",
3830 onfail="Failed to push Network Configuration to ONOS" )
3831
3832 main.step( "Check Network config" )
3833 try:
3834 cfgFile = cfgPath + main.params[ 'topology' ][ 'configName' ]
3835 with open( cfgFile, 'r' ) as contents:
3836 pushedNetCfg = json.load( contents )
3837 pushedNetCfg = json.loads( json.dumps( pushedNetCfg ).lower() )
3838 except IOError:
3839 main.log.exception( "Net Cfg file not found." )
3840 main.cleanAndExit()
3841 netCfgSleep = int( main.params[ 'timers' ][ 'NetCfg' ] )
3842 time.sleep( netCfgSleep )
3843 rawONOSNetCfg = utilities.retry( f=main.Cluster.next().REST.getNetCfg,
3844 retValue=False,
3845 attempts=5,
3846 sleep=netCfgSleep )
3847 # Fix differences between ONOS printing and Pushed Cfg
3848 onosNetCfg = json.loads( rawONOSNetCfg.lower() )
3849
3850 # Compare pushed device config
3851 cfgResult = True
3852 for did, pushedDevice in pushedNetCfg[ 'devices' ].items():
3853 onosDevice = onosNetCfg[ 'devices' ].get( did )
3854 if pushedDevice != onosDevice:
3855 cfgResult = False
3856 main.log.error( "Pushed Network configuration does not match what is in " +
3857 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedDevice ),
3858 ctrl.pprint( onosDevice ) ) )
3859
3860 # Compare pushed port config
3861 for portURI, pushedInterface in pushedNetCfg[ 'ports' ].items():
3862 onosInterface = onosNetCfg[ 'ports' ].get( portURI )
3863 # NOTE: pushed Cfg doesn't have macs
3864 for i in xrange( 0, len( pushedInterface[ 'interfaces' ] ) ):
3865 keys = pushedInterface[ 'interfaces' ][ i ].keys()
3866 portCompare = True
3867 for key in keys:
3868 if pushedInterface[ 'interfaces' ][ i ].get( key ) != onosInterface[ 'interfaces' ][ i ].get( key ) :
3869 main.log.debug( "{} mismatch for port {}".format( key, portURI ) )
3870 portCompare = False
3871 if not portCompare:
3872 cfgResult = False
3873 main.log.error( "Pushed Network configuration does not match what is in " +
3874 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedInterface ),
3875 ctrl.pprint( onosInterface ) ) )
3876
Jon Hall9677ed32018-04-24 11:16:23 -07003877 if pushedNetCfg.get( 'hosts' ) is not None:
3878 # Compare pushed host config
3879 for hid, pushedHost in pushedNetCfg[ 'hosts' ].items():
3880 onosHost = onosNetCfg[ 'hosts' ].get( hid.lower() )
3881 if pushedHost != onosHost:
3882 cfgResult = False
3883 main.log.error( "Pushed Network configuration does not match what is in " +
Jon Hall0e240372018-05-02 11:21:57 -07003884 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
Jon Hall9677ed32018-04-24 11:16:23 -07003885 ctrl.pprint( onosHost ) ) )
Jon Hallab611372018-02-21 15:26:05 -08003886 utilities.assert_equals( expect=True,
3887 actual=cfgResult,
3888 onpass="Net Cfg set",
3889 onfail="Net Cfg not correctly set" )
3890 if not cfgResult:
3891 main.log.debug( "Pushed Network Config:" + ctrl.pprint( pushedNetCfg ) )
3892 main.log.debug( "ONOS Network Config:" + ctrl.pprint( onosNetCfg ) )
3893
3894 main.step( "Start Mininet topology" )
3895 for f in main.params[ 'topology' ][ 'files' ].values():
3896 main.ONOSbench.scp( main.Mininet1,
3897 f,
3898 main.Mininet1.home,
3899 direction="to" )
3900 topoName = main.params[ 'topology' ][ 'topoFile' ]
3901 topo = main.Mininet1.home + topoName
3902 ctrlList = ''
3903 for ctrl in main.Cluster.controllers:
3904 ctrlList += str( ctrl.ipAddress ) + ","
3905 args = main.params[ 'topology' ][ 'args' ]
3906 startResult = main.Mininet1.startNet( topoFile=topo,
3907 args=" --onos-ip=" + ctrlList + " " + args )
3908 utilities.assert_equals( expect=main.TRUE, actual=startResult,
3909 onpass="Mininet Started",
3910 onfail="Failed to start Mininet" )
3911 # Give SR app time to configure the network
3912 time.sleep( int( main.params[ 'timers' ][ 'SRSetup' ] ) )