blob: 0e5da1d79a5a29f4fa39b05e3ad18035a976009a [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hallab611372018-02-21 15:26:05 -080031 main.topoMappings = {}
Jon Hall57b50432015-10-22 10:20:10 -070032
Devin Lim58046fa2017-07-05 16:55:00 -070033 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070034 # copy gen-partions file to ONOS
35 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hallab611372018-02-21 15:26:05 -080036 srcFile = main.testsRoot + "/HA/dependencies/onos-gen-partitions"
Devin Lim58046fa2017-07-05 16:55:00 -070037 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
38 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
39 main.ONOSbench.ip_address,
40 srcFile,
41 dstDir,
42 pwd=main.ONOSbench.pwd,
43 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070044
Devin Lim58046fa2017-07-05 16:55:00 -070045 def cleanUpGenPartition( self ):
46 # clean up gen-partitions file
47 try:
48 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
51 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
52 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
53 str( main.ONOSbench.handle.before ) )
54 except ( pexpect.TIMEOUT, pexpect.EOF ):
55 main.log.exception( "ONOSbench: pexpect exception found:" +
56 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070057 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070058
Devin Lim58046fa2017-07-05 16:55:00 -070059 def startingMininet( self ):
60 main.step( "Starting Mininet" )
61 # scp topo file to mininet
62 # TODO: move to params?
63 topoName = "obelisk.py"
64 filePath = main.ONOSbench.home + "/tools/test/topos/"
65 main.ONOSbench.scp( main.Mininet1,
66 filePath + topoName,
67 main.Mininet1.home,
68 direction="to" )
69 mnResult = main.Mininet1.startNet()
70 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
71 onpass="Mininet Started",
72 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070073
Devin Lim58046fa2017-07-05 16:55:00 -070074 def scalingMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070075 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070076 main.scaling = main.params[ 'scaling' ].split( "," )
77 main.log.debug( main.scaling )
78 scale = main.scaling.pop( 0 )
79 main.log.debug( scale )
Jon Hallab611372018-02-21 15:26:05 -080080 if "b" in scale:
Devin Lim58046fa2017-07-05 16:55:00 -070081 equal = True
82 else:
83 equal = False
84 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070085 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
86 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070087 utilities.assert_equals( expect=main.TRUE, actual=genResult,
88 onpass="New cluster metadata file generated",
89 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070090
Devin Lim58046fa2017-07-05 16:55:00 -070091 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070092 main.step( "Generate initial metadata file" )
93 if main.Cluster.numCtrls >= 5:
94 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070095 else:
96 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070097 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070098 utilities.assert_equals( expect=main.TRUE, actual=genResult,
99 onpass="New cluster metadata file generated",
100 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700101
Devin Lim142b5342017-07-20 15:22:39 -0700102 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700103 import os
104 main.step( "Setup server for cluster metadata file" )
105 main.serverPort = main.params[ 'server' ][ 'port' ]
106 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
107 main.log.debug( "Root dir: {}".format( rootDir ) )
108 status = main.Server.start( main.ONOSbench,
109 rootDir,
110 port=main.serverPort,
111 logDir=main.logdir + "/server.log" )
112 utilities.assert_equals( expect=main.TRUE, actual=status,
113 onpass="Server started",
114 onfail="Failled to start SimpleHTTPServer" )
115
Jon Hall4f360bc2017-09-07 10:19:52 -0700116 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700117 main.step( "Copying backup config files" )
118 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
119 cp = main.ONOSbench.scp( main.ONOSbench,
120 main.onosServicepath,
121 main.onosServicepath + ".backup",
122 direction="to" )
123
124 utilities.assert_equals( expect=main.TRUE,
125 actual=cp,
126 onpass="Copy backup config file succeeded",
127 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700128
129 def setMetadataUrl( self ):
130 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700131 # we need to modify the onos-service file to use remote metadata file
132 # url for cluster metadata file
133 iface = main.params[ 'server' ].get( 'interface' )
134 ip = main.ONOSbench.getIpAddr( iface=iface )
135 metaFile = "cluster.json"
136 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
137 main.log.warn( javaArgs )
138 main.log.warn( repr( javaArgs ) )
139 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700140 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
141 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700142 main.log.warn( sed )
143 main.log.warn( repr( sed ) )
144 handle.sendline( sed )
145 handle.expect( metaFile )
146 output = handle.before
147 handle.expect( "\$" )
148 output += handle.before
149 main.log.debug( repr( output ) )
150
151 def cleanUpOnosService( self ):
152 # Cleanup custom onos-service file
153 main.ONOSbench.scp( main.ONOSbench,
154 main.onosServicepath + ".backup",
155 main.onosServicepath,
156 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700157
Jon Halla440e872016-03-31 15:15:50 -0700158 def consistentCheck( self ):
159 """
160 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700161
Jon Hallf37d44d2017-05-24 10:37:30 -0700162 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700163 - onosCounters is the parsed json output of the counters command on
164 all nodes
165 - consistent is main.TRUE if all "TestON" counters are consitent across
166 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700167 """
Jon Halle1a3b752015-07-22 13:02:46 -0700168 try:
Jon Halla440e872016-03-31 15:15:50 -0700169 # Get onos counters results
170 onosCountersRaw = []
171 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700172 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700173 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700174 name="counters-" + str( ctrl ),
175 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700176 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700177 'randomTime': True } )
178 threads.append( t )
179 t.start()
180 for t in threads:
181 t.join()
182 onosCountersRaw.append( t.result )
183 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700184 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700185 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700186 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700187 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700188 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700189 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700190 main.log.warn( repr( onosCountersRaw[ i ] ) )
191 onosCounters.append( [] )
192
193 testCounters = {}
194 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700195 # lookes like a dict whose keys are the name of the ONOS node and
196 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700197 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700198 # }
199 # NOTE: There is an assumtion that all nodes are active
200 # based on the above for loops
201 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700202 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700203 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700204 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700205 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700207 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700209 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700210 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700211 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
212 if all( tmp ):
213 consistent = main.TRUE
214 else:
215 consistent = main.FALSE
216 main.log.error( "ONOS nodes have different values for counters:\n" +
217 testCounters )
218 return ( onosCounters, consistent )
219 except Exception:
220 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700221 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700222
223 def counterCheck( self, counterName, counterValue ):
224 """
225 Checks that TestON counters are consistent across all nodes and that
226 specified counter is in ONOS with the given value
227 """
228 try:
229 correctResults = main.TRUE
230 # Get onos counters results and consistentCheck
231 onosCounters, consistent = self.consistentCheck()
232 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700233 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700234 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700235 onosValue = None
236 try:
237 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700238 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700239 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700240 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700241 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700242 correctResults = main.FALSE
243 if onosValue == counterValue:
244 main.log.info( counterName + " counter value is correct" )
245 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700246 main.log.error( counterName +
247 " counter value is incorrect," +
248 " expected value: " + str( counterValue ) +
249 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700250 correctResults = main.FALSE
251 return consistent and correctResults
252 except Exception:
253 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700254 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700255
256 def consistentLeaderboards( self, nodes ):
257 TOPIC = 'org.onosproject.election'
258 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700259 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700260 for n in range( 5 ): # Retry in case election is still happening
261 leaderList = []
262 # Get all leaderboards
263 for cli in nodes:
264 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
265 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700266 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700267 leaderList is not None
268 main.log.debug( leaderList )
269 main.log.warn( result )
270 if result:
271 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700272 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700273 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
274 return ( result, leaderList )
275
Devin Lim58046fa2017-07-05 16:55:00 -0700276 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
Jeremy Ronquillo7f8fb572017-11-14 08:28:41 -0800277 # DEPRECATED: ONOSSetup.py now creates these graphs.
278
279 main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
Jon Hallca319892017-06-15 15:25:22 -0700280
Devin Lim58046fa2017-07-05 16:55:00 -0700281 def initialSetUp( self, serviceClean=False ):
282 """
283 rest of initialSetup
284 """
Devin Lim58046fa2017-07-05 16:55:00 -0700285 if main.params[ 'tcpdump' ].lower() == "true":
286 main.step( "Start Packet Capture MN" )
287 main.Mininet2.startTcpdump(
288 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
289 + "-MN.pcap",
290 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
291 port=main.params[ 'MNtcpdump' ][ 'port' ] )
292
293 if serviceClean:
294 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700295 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
296 main.ONOSbench.handle.expect( "\$" )
297 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
298 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700299
300 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800301 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700302 False,
Jon Hall5d5876e2017-11-30 09:33:16 -0800303 attempts=9 )
Devin Lim58046fa2017-07-05 16:55:00 -0700304
305 utilities.assert_equals( expect=True, actual=nodeResults,
306 onpass="Nodes check successful",
307 onfail="Nodes check NOT successful" )
308
309 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700310 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700311 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700312 ctrl.name,
313 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700314 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700315 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700316
317 main.step( "Activate apps defined in the params file" )
318 # get data from the params
319 apps = main.params.get( 'apps' )
320 if apps:
321 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700322 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700323 activateResult = True
324 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700325 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700326 # TODO: check this worked
327 time.sleep( 10 ) # wait for apps to activate
328 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700329 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700330 if state == "ACTIVE":
331 activateResult = activateResult and True
332 else:
333 main.log.error( "{} is in {} state".format( app, state ) )
334 activateResult = False
335 utilities.assert_equals( expect=True,
336 actual=activateResult,
337 onpass="Successfully activated apps",
338 onfail="Failed to activate apps" )
339 else:
340 main.log.warn( "No apps were specified to be loaded after startup" )
341
342 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700343 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700344 config = main.params.get( 'ONOS_Configuration' )
345 if config:
346 main.log.debug( config )
347 checkResult = main.TRUE
348 for component in config:
349 for setting in config[ component ]:
350 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700351 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700352 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
353 checkResult = check and checkResult
354 utilities.assert_equals( expect=main.TRUE,
355 actual=checkResult,
356 onpass="Successfully set config",
357 onfail="Failed to set config" )
358 else:
359 main.log.warn( "No configurations were specified to be changed after startup" )
360
Jon Hallca319892017-06-15 15:25:22 -0700361 main.step( "Check app ids" )
362 appCheck = self.appCheck()
363 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700364 onpass="App Ids seem to be correct",
365 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700366
Jon Hallca319892017-06-15 15:25:22 -0700367 def commonChecks( self ):
368 # TODO: make this assertable or assert in here?
369 self.topicsCheck()
370 self.partitionsCheck()
371 self.pendingMapCheck()
372 self.appCheck()
373
374 def topicsCheck( self, extraTopics=[] ):
375 """
376 Check for work partition topics in leaders output
377 """
378 leaders = main.Cluster.next().leaders()
379 missing = False
380 try:
381 if leaders:
382 parsedLeaders = json.loads( leaders )
383 output = json.dumps( parsedLeaders,
384 sort_keys=True,
385 indent=4,
386 separators=( ',', ': ' ) )
387 main.log.debug( "Leaders: " + output )
388 # check for all intent partitions
389 topics = []
390 for i in range( 14 ):
391 topics.append( "work-partition-" + str( i ) )
392 topics += extraTopics
393 main.log.debug( topics )
394 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
395 for topic in topics:
396 if topic not in ONOStopics:
397 main.log.error( "Error: " + topic +
398 " not in leaders" )
399 missing = True
400 else:
401 main.log.error( "leaders() returned None" )
402 except ( ValueError, TypeError ):
403 main.log.exception( "Error parsing leaders" )
404 main.log.error( repr( leaders ) )
405 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700406 # NOTE Can we refactor this into the Cluster class?
407 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700408 for ctrl in main.Cluster.active():
409 response = ctrl.CLI.leaders( jsonFormat=False )
410 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
411 str( response ) )
412 return missing
413
414 def partitionsCheck( self ):
415 # TODO: return something assertable
416 partitions = main.Cluster.next().partitions()
417 try:
418 if partitions:
419 parsedPartitions = json.loads( partitions )
420 output = json.dumps( parsedPartitions,
421 sort_keys=True,
422 indent=4,
423 separators=( ',', ': ' ) )
424 main.log.debug( "Partitions: " + output )
425 # TODO check for a leader in all paritions
426 # TODO check for consistency among nodes
427 else:
428 main.log.error( "partitions() returned None" )
429 except ( ValueError, TypeError ):
430 main.log.exception( "Error parsing partitions" )
431 main.log.error( repr( partitions ) )
432
433 def pendingMapCheck( self ):
434 pendingMap = main.Cluster.next().pendingMap()
435 try:
436 if pendingMap:
437 parsedPending = json.loads( pendingMap )
438 output = json.dumps( parsedPending,
439 sort_keys=True,
440 indent=4,
441 separators=( ',', ': ' ) )
442 main.log.debug( "Pending map: " + output )
443 # TODO check something here?
444 else:
445 main.log.error( "pendingMap() returned None" )
446 except ( ValueError, TypeError ):
447 main.log.exception( "Error parsing pending map" )
448 main.log.error( repr( pendingMap ) )
449
450 def appCheck( self ):
451 """
452 Check App IDs on all nodes
453 """
454 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800455 for i in range( 15 ):
456 # TODO modify retry or add a new version that accepts looking for
457 # a value in a return list instead of needing to match the entire
458 # return value to retry
459 appResults = main.Cluster.command( "appToIDCheck" )
460 appCheck = all( i == main.TRUE for i in appResults )
461 if appCheck:
462 break
463 else:
464 time.sleep( 5 )
465
Jon Hallca319892017-06-15 15:25:22 -0700466 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700467 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800468 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
469 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700470 return appCheck
471
Jon Halle0f0b342017-04-18 11:43:47 -0700472 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
473 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700474 completedValues = main.Cluster.command( "workQueueTotalCompleted",
475 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700476 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700477 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700478 completedResult = all( completedResults )
479 if not completedResult:
480 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
481 workQueueName, completed, completedValues ) )
482
483 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700484 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
485 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700486 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700487 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700488 inProgressResult = all( inProgressResults )
489 if not inProgressResult:
490 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
491 workQueueName, inProgress, inProgressValues ) )
492
493 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700494 pendingValues = main.Cluster.command( "workQueueTotalPending",
495 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700496 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700497 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700498 pendingResult = all( pendingResults )
499 if not pendingResult:
500 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
501 workQueueName, pending, pendingValues ) )
502 return completedResult and inProgressResult and pendingResult
503
Devin Lim58046fa2017-07-05 16:55:00 -0700504 def assignDevices( self, main ):
505 """
506 Assign devices to controllers
507 """
508 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700509 assert main, "main not defined"
510 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700511
512 main.case( "Assigning devices to controllers" )
513 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
514 "and check that an ONOS node becomes the " + \
515 "master of the device."
516 main.step( "Assign switches to controllers" )
517
Jon Hallca319892017-06-15 15:25:22 -0700518 ipList = main.Cluster.getIps()
Jon Hallab611372018-02-21 15:26:05 -0800519 swList = main.Mininet1.getSwitches().keys()
Devin Lim58046fa2017-07-05 16:55:00 -0700520 main.Mininet1.assignSwController( sw=swList, ip=ipList )
521
522 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -0800523 for switch in swList:
524 response = main.Mininet1.getSwController( switch )
Devin Lim58046fa2017-07-05 16:55:00 -0700525 try:
526 main.log.info( str( response ) )
Jon Hallab611372018-02-21 15:26:05 -0800527 for ctrl in main.Cluster.runningNodes:
528 if re.search( "tcp:" + ctrl.ipAddress, response ):
529 mastershipCheck = mastershipCheck and main.TRUE
530 else:
531 main.log.error( "Error, node " + repr( ctrl ) + " is " +
532 "not in the list of controllers " +
533 switch + " is connecting to." )
534 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700535 except Exception:
Jon Hallab611372018-02-21 15:26:05 -0800536 main.log.warn( "Error parsing get-controller response" )
537 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700538 utilities.assert_equals(
539 expect=main.TRUE,
540 actual=mastershipCheck,
541 onpass="Switch mastership assigned correctly",
542 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700543
Jon Hallab611372018-02-21 15:26:05 -0800544 # Mappings for attachmentPoints from host mac to deviceID
545 # TODO: make the key a dict with deviceIds and port #'s
546 # FIXME: topo-HA/obelisk specific mappings:
547 # key is mac and value is dpid
548 main.topoMappings = {}
549 for i in range( 1, 29 ): # hosts 1 through 28
550 # set up correct variables:
551 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
552 if i == 1:
553 deviceId = "1000".zfill( 16 )
554 elif i == 2:
555 deviceId = "2000".zfill( 16 )
556 elif i == 3:
557 deviceId = "3000".zfill( 16 )
558 elif i == 4:
559 deviceId = "3004".zfill( 16 )
560 elif i == 5:
561 deviceId = "5000".zfill( 16 )
562 elif i == 6:
563 deviceId = "6000".zfill( 16 )
564 elif i == 7:
565 deviceId = "6007".zfill( 16 )
566 elif i >= 8 and i <= 17:
567 dpid = '3' + str( i ).zfill( 3 )
568 deviceId = dpid.zfill( 16 )
569 elif i >= 18 and i <= 27:
570 dpid = '6' + str( i ).zfill( 3 )
571 deviceId = dpid.zfill( 16 )
572 elif i == 28:
573 deviceId = "2800".zfill( 16 )
574 main.topoMappings[ macId ] = deviceId
575
Devin Lim58046fa2017-07-05 16:55:00 -0700576 def assignIntents( self, main ):
577 """
578 Assign intents
579 """
580 import time
581 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700582 assert main, "main not defined"
583 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700584 try:
585 main.HAlabels
586 except ( NameError, AttributeError ):
587 main.log.error( "main.HAlabels not defined, setting to []" )
588 main.HAlabels = []
589 try:
590 main.HAdata
591 except ( NameError, AttributeError ):
592 main.log.error( "data not defined, setting to []" )
593 main.HAdata = []
594 main.case( "Adding host Intents" )
595 main.caseExplanation = "Discover hosts by using pingall then " +\
596 "assign predetermined host-to-host intents." +\
597 " After installation, check that the intent" +\
598 " is distributed to all nodes and the state" +\
599 " is INSTALLED"
600
601 # install onos-app-fwd
602 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700603 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700604 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700605 utilities.assert_equals( expect=main.TRUE, actual=installResults,
606 onpass="Install fwd successful",
607 onfail="Install fwd failed" )
608
609 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700610 appCheck = self.appCheck()
611 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700612 onpass="App Ids seem to be correct",
613 onfail="Something is wrong with app Ids" )
614
615 main.step( "Discovering Hosts( Via pingall for now )" )
616 # FIXME: Once we have a host discovery mechanism, use that instead
617 # REACTIVE FWD test
618 pingResult = main.FALSE
619 passMsg = "Reactive Pingall test passed"
620 time1 = time.time()
621 pingResult = main.Mininet1.pingall()
622 time2 = time.time()
623 if not pingResult:
624 main.log.warn( "First pingall failed. Trying again..." )
625 pingResult = main.Mininet1.pingall()
626 passMsg += " on the second try"
627 utilities.assert_equals(
628 expect=main.TRUE,
629 actual=pingResult,
630 onpass=passMsg,
631 onfail="Reactive Pingall failed, " +
632 "one or more ping pairs failed" )
633 main.log.info( "Time for pingall: %2f seconds" %
634 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700635 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700636 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700637 # timeout for fwd flows
638 time.sleep( 11 )
639 # uninstall onos-app-fwd
640 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700641 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700642 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
643 onpass="Uninstall fwd successful",
644 onfail="Uninstall fwd failed" )
645
646 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700647 appCheck2 = self.appCheck()
648 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700649 onpass="App Ids seem to be correct",
650 onfail="Something is wrong with app Ids" )
651
652 main.step( "Add host intents via cli" )
653 intentIds = []
654 # TODO: move the host numbers to params
655 # Maybe look at all the paths we ping?
656 intentAddResult = True
657 hostResult = main.TRUE
658 for i in range( 8, 18 ):
659 main.log.info( "Adding host intent between h" + str( i ) +
660 " and h" + str( i + 10 ) )
661 host1 = "00:00:00:00:00:" + \
662 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
663 host2 = "00:00:00:00:00:" + \
664 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
665 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700666 host1Dict = onosCli.CLI.getHost( host1 )
667 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700668 host1Id = None
669 host2Id = None
670 if host1Dict and host2Dict:
671 host1Id = host1Dict.get( 'id', None )
672 host2Id = host2Dict.get( 'id', None )
673 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700674 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700675 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700676 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700677 if tmpId:
678 main.log.info( "Added intent with id: " + tmpId )
679 intentIds.append( tmpId )
680 else:
681 main.log.error( "addHostIntent returned: " +
682 repr( tmpId ) )
683 else:
684 main.log.error( "Error, getHost() failed for h" + str( i ) +
685 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700686 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700687 try:
Jon Hallca319892017-06-15 15:25:22 -0700688 output = json.dumps( json.loads( hosts ),
689 sort_keys=True,
690 indent=4,
691 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700692 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700693 output = repr( hosts )
694 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700695 hostResult = main.FALSE
696 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
697 onpass="Found a host id for each host",
698 onfail="Error looking up host ids" )
699
700 intentStart = time.time()
701 onosIds = onosCli.getAllIntentsId()
702 main.log.info( "Submitted intents: " + str( intentIds ) )
703 main.log.info( "Intents in ONOS: " + str( onosIds ) )
704 for intent in intentIds:
705 if intent in onosIds:
706 pass # intent submitted is in onos
707 else:
708 intentAddResult = False
709 if intentAddResult:
710 intentStop = time.time()
711 else:
712 intentStop = None
713 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700714 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700715 intentStates = []
716 installedCheck = True
717 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
718 count = 0
719 try:
720 for intent in json.loads( intents ):
721 state = intent.get( 'state', None )
722 if "INSTALLED" not in state:
723 installedCheck = False
724 intentId = intent.get( 'id', None )
725 intentStates.append( ( intentId, state ) )
726 except ( ValueError, TypeError ):
727 main.log.exception( "Error parsing intents" )
728 # add submitted intents not in the store
729 tmplist = [ i for i, s in intentStates ]
730 missingIntents = False
731 for i in intentIds:
732 if i not in tmplist:
733 intentStates.append( ( i, " - " ) )
734 missingIntents = True
735 intentStates.sort()
736 for i, s in intentStates:
737 count += 1
738 main.log.info( "%-6s%-15s%-15s" %
739 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700740 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700741
742 intentAddResult = bool( intentAddResult and not missingIntents and
743 installedCheck )
744 if not intentAddResult:
745 main.log.error( "Error in pushing host intents to ONOS" )
746
747 main.step( "Intent Anti-Entropy dispersion" )
748 for j in range( 100 ):
749 correct = True
750 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700751 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700752 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700753 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700754 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700755 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700756 str( sorted( onosIds ) ) )
757 if sorted( ids ) != sorted( intentIds ):
758 main.log.warn( "Set of intent IDs doesn't match" )
759 correct = False
760 break
761 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700762 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700763 for intent in intents:
764 if intent[ 'state' ] != "INSTALLED":
765 main.log.warn( "Intent " + intent[ 'id' ] +
766 " is " + intent[ 'state' ] )
767 correct = False
768 break
769 if correct:
770 break
771 else:
772 time.sleep( 1 )
773 if not intentStop:
774 intentStop = time.time()
775 global gossipTime
776 gossipTime = intentStop - intentStart
777 main.log.info( "It took about " + str( gossipTime ) +
778 " seconds for all intents to appear in each node" )
779 append = False
780 title = "Gossip Intents"
781 count = 1
782 while append is False:
783 curTitle = title + str( count )
784 if curTitle not in main.HAlabels:
785 main.HAlabels.append( curTitle )
786 main.HAdata.append( str( gossipTime ) )
787 append = True
788 else:
789 count += 1
790 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700791 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700792 utilities.assert_greater_equals(
793 expect=maxGossipTime, actual=gossipTime,
794 onpass="ECM anti-entropy for intents worked within " +
795 "expected time",
796 onfail="Intent ECM anti-entropy took too long. " +
797 "Expected time:{}, Actual time:{}".format( maxGossipTime,
798 gossipTime ) )
799 if gossipTime <= maxGossipTime:
800 intentAddResult = True
801
Jon Hallca319892017-06-15 15:25:22 -0700802 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700803 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700804 installedCheck = True
805 main.log.info( "Sleeping 60 seconds to see if intents are found" )
806 time.sleep( 60 )
807 onosIds = onosCli.getAllIntentsId()
808 main.log.info( "Submitted intents: " + str( intentIds ) )
809 main.log.info( "Intents in ONOS: " + str( onosIds ) )
810 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700811 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700812 intentStates = []
813 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
814 count = 0
815 try:
816 for intent in json.loads( intents ):
817 # Iter through intents of a node
818 state = intent.get( 'state', None )
819 if "INSTALLED" not in state:
820 installedCheck = False
821 intentId = intent.get( 'id', None )
822 intentStates.append( ( intentId, state ) )
823 except ( ValueError, TypeError ):
824 main.log.exception( "Error parsing intents" )
825 # add submitted intents not in the store
826 tmplist = [ i for i, s in intentStates ]
827 for i in intentIds:
828 if i not in tmplist:
829 intentStates.append( ( i, " - " ) )
830 intentStates.sort()
831 for i, s in intentStates:
832 count += 1
833 main.log.info( "%-6s%-15s%-15s" %
834 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700835 self.topicsCheck( [ "org.onosproject.election" ] )
836 self.partitionsCheck()
837 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700838
Jon Hallca319892017-06-15 15:25:22 -0700839 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700840 """
841 Ping across added host intents
842 """
843 import json
844 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700845 assert main, "main not defined"
846 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700847 main.case( "Verify connectivity by sending traffic across Intents" )
848 main.caseExplanation = "Ping across added host intents to check " +\
849 "functionality and check the state of " +\
850 "the intent"
851
Jon Hallca319892017-06-15 15:25:22 -0700852 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700853 main.step( "Check Intent state" )
854 installedCheck = False
855 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800856 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700857 installedCheck = True
858 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700859 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700860 intentStates = []
861 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
862 count = 0
863 # Iter through intents of a node
864 try:
865 for intent in json.loads( intents ):
866 state = intent.get( 'state', None )
867 if "INSTALLED" not in state:
868 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700869 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700870 intentId = intent.get( 'id', None )
871 intentStates.append( ( intentId, state ) )
872 except ( ValueError, TypeError ):
873 main.log.exception( "Error parsing intents." )
874 # Print states
875 intentStates.sort()
876 for i, s in intentStates:
877 count += 1
878 main.log.info( "%-6s%-15s%-15s" %
879 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700880 if not installedCheck:
881 time.sleep( 1 )
882 loopCount += 1
883 utilities.assert_equals( expect=True, actual=installedCheck,
884 onpass="Intents are all INSTALLED",
885 onfail="Intents are not all in " +
886 "INSTALLED state" )
887
888 main.step( "Ping across added host intents" )
889 PingResult = main.TRUE
890 for i in range( 8, 18 ):
891 ping = main.Mininet1.pingHost( src="h" + str( i ),
892 target="h" + str( i + 10 ) )
893 PingResult = PingResult and ping
894 if ping == main.FALSE:
895 main.log.warn( "Ping failed between h" + str( i ) +
896 " and h" + str( i + 10 ) )
897 elif ping == main.TRUE:
898 main.log.info( "Ping test passed!" )
899 # Don't set PingResult or you'd override failures
900 if PingResult == main.FALSE:
901 main.log.error(
902 "Intents have not been installed correctly, pings failed." )
903 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700904 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700905 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700906 output = json.dumps( json.loads( tmpIntents ),
907 sort_keys=True,
908 indent=4,
909 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700910 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700911 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700912 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700913 utilities.assert_equals(
914 expect=main.TRUE,
915 actual=PingResult,
916 onpass="Intents have been installed correctly and pings work",
917 onfail="Intents have not been installed correctly, pings failed." )
918
919 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700920 topicsCheck = self.topicsCheck()
921 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700922 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700923 onfail="Some topics were lost" )
924 self.partitionsCheck()
925 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700926
927 if not installedCheck:
928 main.log.info( "Waiting 60 seconds to see if the state of " +
929 "intents change" )
930 time.sleep( 60 )
931 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700932 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700933 intentStates = []
934 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
935 count = 0
936 # Iter through intents of a node
937 try:
938 for intent in json.loads( intents ):
939 state = intent.get( 'state', None )
940 if "INSTALLED" not in state:
941 installedCheck = False
942 intentId = intent.get( 'id', None )
943 intentStates.append( ( intentId, state ) )
944 except ( ValueError, TypeError ):
945 main.log.exception( "Error parsing intents." )
946 intentStates.sort()
947 for i, s in intentStates:
948 count += 1
949 main.log.info( "%-6s%-15s%-15s" %
950 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700951 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700952
Devin Lim58046fa2017-07-05 16:55:00 -0700953 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700954 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700955 main.step( "Wait a minute then ping again" )
956 # the wait is above
957 PingResult = main.TRUE
958 for i in range( 8, 18 ):
959 ping = main.Mininet1.pingHost( src="h" + str( i ),
960 target="h" + str( i + 10 ) )
961 PingResult = PingResult and ping
962 if ping == main.FALSE:
963 main.log.warn( "Ping failed between h" + str( i ) +
964 " and h" + str( i + 10 ) )
965 elif ping == main.TRUE:
966 main.log.info( "Ping test passed!" )
967 # Don't set PingResult or you'd override failures
968 if PingResult == main.FALSE:
969 main.log.error(
970 "Intents have not been installed correctly, pings failed." )
971 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700972 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700973 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700974 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700975 main.log.warn( json.dumps( json.loads( tmpIntents ),
976 sort_keys=True,
977 indent=4,
978 separators=( ',', ': ' ) ) )
979 except ( ValueError, TypeError ):
980 main.log.warn( repr( tmpIntents ) )
981 utilities.assert_equals(
982 expect=main.TRUE,
983 actual=PingResult,
984 onpass="Intents have been installed correctly and pings work",
985 onfail="Intents have not been installed correctly, pings failed." )
986
Devin Lim142b5342017-07-20 15:22:39 -0700987 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700988 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700989 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700990 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700991 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700992 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700993 actual=rolesNotNull,
994 onpass="Each device has a master",
995 onfail="Some devices don't have a master assigned" )
996
Devin Lim142b5342017-07-20 15:22:39 -0700997 def checkTheRole( self ):
998 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700999 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001000 consistentMastership = True
1001 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001002 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001003 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001004 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001005 main.log.error( "Error in getting " + node + " roles" )
1006 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001007 repr( ONOSMastership[ i ] ) )
1008 rolesResults = False
1009 utilities.assert_equals(
1010 expect=True,
1011 actual=rolesResults,
1012 onpass="No error in reading roles output",
1013 onfail="Error in reading roles from ONOS" )
1014
1015 main.step( "Check for consistency in roles from each controller" )
1016 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1017 main.log.info(
1018 "Switch roles are consistent across all ONOS nodes" )
1019 else:
1020 consistentMastership = False
1021 utilities.assert_equals(
1022 expect=True,
1023 actual=consistentMastership,
1024 onpass="Switch roles are consistent across all ONOS nodes",
1025 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001026 return ONOSMastership, rolesResults, consistentMastership
1027
1028 def checkingIntents( self ):
1029 main.step( "Get the intents from each controller" )
1030 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1031 intentsResults = True
1032 for i in range( len( ONOSIntents ) ):
1033 node = str( main.Cluster.active( i ) )
1034 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1035 main.log.error( "Error in getting " + node + " intents" )
1036 main.log.warn( node + " intents response: " +
1037 repr( ONOSIntents[ i ] ) )
1038 intentsResults = False
1039 utilities.assert_equals(
1040 expect=True,
1041 actual=intentsResults,
1042 onpass="No error in reading intents output",
1043 onfail="Error in reading intents from ONOS" )
1044 return ONOSIntents, intentsResults
1045
1046 def readingState( self, main ):
1047 """
1048 Reading state of ONOS
1049 """
1050 import json
Devin Lim142b5342017-07-20 15:22:39 -07001051 assert main, "main not defined"
1052 assert utilities.assert_equals, "utilities.assert_equals not defined"
1053 try:
1054 from tests.dependencies.topology import Topology
1055 except ImportError:
1056 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001057 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001058 try:
1059 main.topoRelated
1060 except ( NameError, AttributeError ):
1061 main.topoRelated = Topology()
1062 main.case( "Setting up and gathering data for current state" )
1063 # The general idea for this test case is to pull the state of
1064 # ( intents,flows, topology,... ) from each ONOS node
1065 # We can then compare them with each other and also with past states
1066
1067 global mastershipState
1068 mastershipState = '[]'
1069
1070 self.checkRoleNotNull()
1071
1072 main.step( "Get the Mastership of each switch from each controller" )
1073 mastershipCheck = main.FALSE
1074
1075 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001076
1077 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001078 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001079 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001080 try:
1081 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001082 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001083 json.dumps(
1084 json.loads( ONOSMastership[ i ] ),
1085 sort_keys=True,
1086 indent=4,
1087 separators=( ',', ': ' ) ) )
1088 except ( ValueError, TypeError ):
1089 main.log.warn( repr( ONOSMastership[ i ] ) )
1090 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001091 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001092 mastershipState = ONOSMastership[ 0 ]
1093
Devin Lim58046fa2017-07-05 16:55:00 -07001094 global intentState
1095 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001096 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001097 intentCheck = main.FALSE
1098 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001099
Devin Lim58046fa2017-07-05 16:55:00 -07001100 main.step( "Check for consistency in Intents from each controller" )
1101 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1102 main.log.info( "Intents are consistent across all ONOS " +
1103 "nodes" )
1104 else:
1105 consistentIntents = False
1106 main.log.error( "Intents not consistent" )
1107 utilities.assert_equals(
1108 expect=True,
1109 actual=consistentIntents,
1110 onpass="Intents are consistent across all ONOS nodes",
1111 onfail="ONOS nodes have different views of intents" )
1112
1113 if intentsResults:
1114 # Try to make it easy to figure out what is happening
1115 #
1116 # Intent ONOS1 ONOS2 ...
1117 # 0x01 INSTALLED INSTALLING
1118 # ... ... ...
1119 # ... ... ...
1120 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001121 for ctrl in main.Cluster.active():
1122 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001123 main.log.warn( title )
1124 # get all intent keys in the cluster
1125 keys = []
1126 try:
1127 # Get the set of all intent keys
1128 for nodeStr in ONOSIntents:
1129 node = json.loads( nodeStr )
1130 for intent in node:
1131 keys.append( intent.get( 'id' ) )
1132 keys = set( keys )
1133 # For each intent key, print the state on each node
1134 for key in keys:
1135 row = "%-13s" % key
1136 for nodeStr in ONOSIntents:
1137 node = json.loads( nodeStr )
1138 for intent in node:
1139 if intent.get( 'id', "Error" ) == key:
1140 row += "%-15s" % intent.get( 'state' )
1141 main.log.warn( row )
1142 # End of intent state table
1143 except ValueError as e:
1144 main.log.exception( e )
1145 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1146
1147 if intentsResults and not consistentIntents:
1148 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001149 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001150 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1151 sort_keys=True,
1152 indent=4,
1153 separators=( ',', ': ' ) ) )
1154 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001155 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001156 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001157 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001158 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1159 sort_keys=True,
1160 indent=4,
1161 separators=( ',', ': ' ) ) )
1162 else:
Jon Hallca319892017-06-15 15:25:22 -07001163 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001164 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001165 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001166 intentState = ONOSIntents[ 0 ]
1167
1168 main.step( "Get the flows from each controller" )
1169 global flowState
1170 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001171 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001172 ONOSFlowsJson = []
1173 flowCheck = main.FALSE
1174 consistentFlows = True
1175 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001176 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001177 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001178 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001179 main.log.error( "Error in getting " + node + " flows" )
1180 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001181 repr( ONOSFlows[ i ] ) )
1182 flowsResults = False
1183 ONOSFlowsJson.append( None )
1184 else:
1185 try:
1186 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1187 except ( ValueError, TypeError ):
1188 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001189 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001190 " response as json." )
1191 main.log.error( repr( ONOSFlows[ i ] ) )
1192 ONOSFlowsJson.append( None )
1193 flowsResults = False
1194 utilities.assert_equals(
1195 expect=True,
1196 actual=flowsResults,
1197 onpass="No error in reading flows output",
1198 onfail="Error in reading flows from ONOS" )
1199
1200 main.step( "Check for consistency in Flows from each controller" )
1201 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1202 if all( tmp ):
1203 main.log.info( "Flow count is consistent across all ONOS nodes" )
1204 else:
1205 consistentFlows = False
1206 utilities.assert_equals(
1207 expect=True,
1208 actual=consistentFlows,
1209 onpass="The flow count is consistent across all ONOS nodes",
1210 onfail="ONOS nodes have different flow counts" )
1211
1212 if flowsResults and not consistentFlows:
1213 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001214 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001215 try:
1216 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001217 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001218 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1219 indent=4, separators=( ',', ': ' ) ) )
1220 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001221 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001222 repr( ONOSFlows[ i ] ) )
1223 elif flowsResults and consistentFlows:
1224 flowCheck = main.TRUE
1225 flowState = ONOSFlows[ 0 ]
1226
1227 main.step( "Get the OF Table entries" )
1228 global flows
Jon Hallab611372018-02-21 15:26:05 -08001229 flows = {}
1230 for swName, swDetails in main.Mininet1.getSwitches().items():
1231 main.log.debug( repr( swName ) + repr( swDetails ) )
1232 flows[ swName ] = main.Mininet1.getFlowTable( swName, version="1.3", debug=False )
Devin Lim58046fa2017-07-05 16:55:00 -07001233 if flowCheck == main.FALSE:
1234 for table in flows:
1235 main.log.warn( table )
1236 # TODO: Compare switch flow tables with ONOS flow tables
1237
1238 main.step( "Start continuous pings" )
Jon Hallab611372018-02-21 15:26:05 -08001239 if main.params.get( 'PING', False ):
1240 # TODO: Make this more dynamic and less hardcoded, ie, # or ping pairs
1241 main.Mininet2.pingLong(
1242 src=main.params[ 'PING' ][ 'source1' ],
1243 target=main.params[ 'PING' ][ 'target1' ],
1244 pingTime=500 )
1245 main.Mininet2.pingLong(
1246 src=main.params[ 'PING' ][ 'source2' ],
1247 target=main.params[ 'PING' ][ 'target2' ],
1248 pingTime=500 )
1249 main.Mininet2.pingLong(
1250 src=main.params[ 'PING' ][ 'source3' ],
1251 target=main.params[ 'PING' ][ 'target3' ],
1252 pingTime=500 )
1253 main.Mininet2.pingLong(
1254 src=main.params[ 'PING' ][ 'source4' ],
1255 target=main.params[ 'PING' ][ 'target4' ],
1256 pingTime=500 )
1257 main.Mininet2.pingLong(
1258 src=main.params[ 'PING' ][ 'source5' ],
1259 target=main.params[ 'PING' ][ 'target5' ],
1260 pingTime=500 )
1261 main.Mininet2.pingLong(
1262 src=main.params[ 'PING' ][ 'source6' ],
1263 target=main.params[ 'PING' ][ 'target6' ],
1264 pingTime=500 )
1265 main.Mininet2.pingLong(
1266 src=main.params[ 'PING' ][ 'source7' ],
1267 target=main.params[ 'PING' ][ 'target7' ],
1268 pingTime=500 )
1269 main.Mininet2.pingLong(
1270 src=main.params[ 'PING' ][ 'source8' ],
1271 target=main.params[ 'PING' ][ 'target8' ],
1272 pingTime=500 )
1273 main.Mininet2.pingLong(
1274 src=main.params[ 'PING' ][ 'source9' ],
1275 target=main.params[ 'PING' ][ 'target9' ],
1276 pingTime=500 )
1277 main.Mininet2.pingLong(
1278 src=main.params[ 'PING' ][ 'source10' ],
1279 target=main.params[ 'PING' ][ 'target10' ],
1280 pingTime=500 )
Devin Lim58046fa2017-07-05 16:55:00 -07001281
1282 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001283 devices = main.topoRelated.getAll( "devices" )
1284 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1285 ports = main.topoRelated.getAll( "ports" )
1286 links = main.topoRelated.getAll( "links" )
1287 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001288 # Compare json objects for hosts and dataplane clusters
1289
1290 # hosts
1291 main.step( "Host view is consistent across ONOS nodes" )
1292 consistentHostsResult = main.TRUE
1293 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001294 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001295 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1296 if hosts[ controller ] == hosts[ 0 ]:
1297 continue
1298 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001299 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001300 controllerStr +
1301 " is inconsistent with ONOS1" )
1302 main.log.warn( repr( hosts[ controller ] ) )
1303 consistentHostsResult = main.FALSE
1304
1305 else:
Jon Hallca319892017-06-15 15:25:22 -07001306 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001307 controllerStr )
1308 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001309 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001310 " hosts response: " +
1311 repr( hosts[ controller ] ) )
1312 utilities.assert_equals(
1313 expect=main.TRUE,
1314 actual=consistentHostsResult,
1315 onpass="Hosts view is consistent across all ONOS nodes",
1316 onfail="ONOS nodes have different views of hosts" )
1317
1318 main.step( "Each host has an IP address" )
1319 ipResult = main.TRUE
1320 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001321 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001322 if hosts[ controller ]:
1323 for host in hosts[ controller ]:
1324 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001325 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001326 controllerStr + ": " + str( host ) )
1327 ipResult = main.FALSE
1328 utilities.assert_equals(
1329 expect=main.TRUE,
1330 actual=ipResult,
1331 onpass="The ips of the hosts aren't empty",
1332 onfail="The ip of at least one host is missing" )
1333
1334 # Strongly connected clusters of devices
1335 main.step( "Cluster view is consistent across ONOS nodes" )
1336 consistentClustersResult = main.TRUE
1337 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001338 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001339 if "Error" not in clusters[ controller ]:
1340 if clusters[ controller ] == clusters[ 0 ]:
1341 continue
1342 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001343 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001344 " is inconsistent with ONOS1" )
1345 consistentClustersResult = main.FALSE
1346
1347 else:
1348 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001349 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001350 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001351 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001352 " clusters response: " +
1353 repr( clusters[ controller ] ) )
1354 utilities.assert_equals(
1355 expect=main.TRUE,
1356 actual=consistentClustersResult,
1357 onpass="Clusters view is consistent across all ONOS nodes",
1358 onfail="ONOS nodes have different views of clusters" )
1359 if not consistentClustersResult:
1360 main.log.debug( clusters )
1361
1362 # there should always only be one cluster
1363 main.step( "Cluster view correct across ONOS nodes" )
1364 try:
1365 numClusters = len( json.loads( clusters[ 0 ] ) )
1366 except ( ValueError, TypeError ):
1367 main.log.exception( "Error parsing clusters[0]: " +
1368 repr( clusters[ 0 ] ) )
1369 numClusters = "ERROR"
1370 utilities.assert_equals(
1371 expect=1,
1372 actual=numClusters,
1373 onpass="ONOS shows 1 SCC",
1374 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1375
1376 main.step( "Comparing ONOS topology to MN" )
1377 devicesResults = main.TRUE
1378 linksResults = main.TRUE
1379 hostsResults = main.TRUE
1380 mnSwitches = main.Mininet1.getSwitches()
1381 mnLinks = main.Mininet1.getLinks()
1382 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001383 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001384 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001385 currentDevicesResult = main.topoRelated.compareDevicePort(
1386 main.Mininet1, controller,
1387 mnSwitches, devices, ports )
1388 utilities.assert_equals( expect=main.TRUE,
1389 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001390 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001391 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001392 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001393 " Switches view is incorrect" )
1394
1395 currentLinksResult = main.topoRelated.compareBase( links, controller,
1396 main.Mininet1.compareLinks,
1397 [ mnSwitches, mnLinks ] )
1398 utilities.assert_equals( expect=main.TRUE,
1399 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001400 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001401 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001402 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001403 " links view is incorrect" )
1404
1405 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1406 currentHostsResult = main.Mininet1.compareHosts(
1407 mnHosts,
1408 hosts[ controller ] )
1409 else:
1410 currentHostsResult = main.FALSE
1411 utilities.assert_equals( expect=main.TRUE,
1412 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001413 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001414 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001415 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001416 " hosts don't match Mininet" )
1417
1418 devicesResults = devicesResults and currentDevicesResult
1419 linksResults = linksResults and currentLinksResult
1420 hostsResults = hostsResults and currentHostsResult
1421
1422 main.step( "Device information is correct" )
1423 utilities.assert_equals(
1424 expect=main.TRUE,
1425 actual=devicesResults,
1426 onpass="Device information is correct",
1427 onfail="Device information is incorrect" )
1428
1429 main.step( "Links are correct" )
1430 utilities.assert_equals(
1431 expect=main.TRUE,
1432 actual=linksResults,
1433 onpass="Link are correct",
1434 onfail="Links are incorrect" )
1435
1436 main.step( "Hosts are correct" )
1437 utilities.assert_equals(
1438 expect=main.TRUE,
1439 actual=hostsResults,
1440 onpass="Hosts are correct",
1441 onfail="Hosts are incorrect" )
1442
1443 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001444 """
1445 Check for basic functionality with distributed primitives
1446 """
Jon Halle0f0b342017-04-18 11:43:47 -07001447 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001448 try:
1449 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001450 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001451 assert main.pCounterName, "main.pCounterName not defined"
1452 assert main.onosSetName, "main.onosSetName not defined"
1453 # NOTE: assert fails if value is 0/None/Empty/False
1454 try:
1455 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001456 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001457 main.log.error( "main.pCounterValue not defined, setting to 0" )
1458 main.pCounterValue = 0
1459 try:
1460 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001461 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001462 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001463 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001464 # Variables for the distributed primitives tests. These are local only
1465 addValue = "a"
1466 addAllValue = "a b c d e f"
1467 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001468 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001469 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001470 workQueueName = "TestON-Queue"
1471 workQueueCompleted = 0
1472 workQueueInProgress = 0
1473 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001474
1475 description = "Check for basic functionality with distributed " +\
1476 "primitives"
1477 main.case( description )
1478 main.caseExplanation = "Test the methods of the distributed " +\
1479 "primitives (counters and sets) throught the cli"
1480 # DISTRIBUTED ATOMIC COUNTERS
1481 # Partitioned counters
1482 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001483 pCounters = main.Cluster.command( "counterTestAddAndGet",
1484 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001485 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001486 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001487 main.pCounterValue += 1
1488 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001489 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001490 pCounterResults = True
1491 for i in addedPValues:
1492 tmpResult = i in pCounters
1493 pCounterResults = pCounterResults and tmpResult
1494 if not tmpResult:
1495 main.log.error( str( i ) + " is not in partitioned "
1496 "counter incremented results" )
1497 utilities.assert_equals( expect=True,
1498 actual=pCounterResults,
1499 onpass="Default counter incremented",
1500 onfail="Error incrementing default" +
1501 " counter" )
1502
1503 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001504 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1505 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001506 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001507 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001508 addedPValues.append( main.pCounterValue )
1509 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001510 # Check that counter incremented numController times
1511 pCounterResults = True
1512 for i in addedPValues:
1513 tmpResult = i in pCounters
1514 pCounterResults = pCounterResults and tmpResult
1515 if not tmpResult:
1516 main.log.error( str( i ) + " is not in partitioned "
1517 "counter incremented results" )
1518 utilities.assert_equals( expect=True,
1519 actual=pCounterResults,
1520 onpass="Default counter incremented",
1521 onfail="Error incrementing default" +
1522 " counter" )
1523
1524 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001525 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001526 utilities.assert_equals( expect=main.TRUE,
1527 actual=incrementCheck,
1528 onpass="Added counters are correct",
1529 onfail="Added counters are incorrect" )
1530
1531 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001532 pCounters = main.Cluster.command( "counterTestAddAndGet",
1533 args=[ main.pCounterName ],
1534 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001535 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001536 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001537 main.pCounterValue += -8
1538 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001539 # Check that counter incremented numController times
1540 pCounterResults = True
1541 for i in addedPValues:
1542 tmpResult = i in pCounters
1543 pCounterResults = pCounterResults and tmpResult
1544 if not tmpResult:
1545 main.log.error( str( i ) + " is not in partitioned "
1546 "counter incremented results" )
1547 utilities.assert_equals( expect=True,
1548 actual=pCounterResults,
1549 onpass="Default counter incremented",
1550 onfail="Error incrementing default" +
1551 " counter" )
1552
1553 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001554 pCounters = main.Cluster.command( "counterTestAddAndGet",
1555 args=[ main.pCounterName ],
1556 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001557 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001558 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001559 main.pCounterValue += 5
1560 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001561
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001562 # Check that counter incremented numController times
1563 pCounterResults = True
1564 for i in addedPValues:
1565 tmpResult = i in pCounters
1566 pCounterResults = pCounterResults and tmpResult
1567 if not tmpResult:
1568 main.log.error( str( i ) + " is not in partitioned "
1569 "counter incremented results" )
1570 utilities.assert_equals( expect=True,
1571 actual=pCounterResults,
1572 onpass="Default counter incremented",
1573 onfail="Error incrementing default" +
1574 " counter" )
1575
1576 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001577 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1578 args=[ main.pCounterName ],
1579 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001580 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001581 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001582 addedPValues.append( main.pCounterValue )
1583 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 # Check that counter incremented numController times
1585 pCounterResults = True
1586 for i in addedPValues:
1587 tmpResult = i in pCounters
1588 pCounterResults = pCounterResults and tmpResult
1589 if not tmpResult:
1590 main.log.error( str( i ) + " is not in partitioned "
1591 "counter incremented results" )
1592 utilities.assert_equals( expect=True,
1593 actual=pCounterResults,
1594 onpass="Default counter incremented",
1595 onfail="Error incrementing default" +
1596 " counter" )
1597
1598 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001599 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001600 utilities.assert_equals( expect=main.TRUE,
1601 actual=incrementCheck,
1602 onpass="Added counters are correct",
1603 onfail="Added counters are incorrect" )
1604
1605 # DISTRIBUTED SETS
1606 main.step( "Distributed Set get" )
1607 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001608 getResponses = main.Cluster.command( "setTestGet",
1609 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001610 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001611 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001612 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001613 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001614 current = set( getResponses[ i ] )
1615 if len( current ) == len( getResponses[ i ] ):
1616 # no repeats
1617 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001618 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001619 " has incorrect view" +
1620 " of set " + main.onosSetName + ":\n" +
1621 str( getResponses[ i ] ) )
1622 main.log.debug( "Expected: " + str( main.onosSet ) )
1623 main.log.debug( "Actual: " + str( current ) )
1624 getResults = main.FALSE
1625 else:
1626 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001627 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001628 " has repeat elements in" +
1629 " set " + main.onosSetName + ":\n" +
1630 str( getResponses[ i ] ) )
1631 getResults = main.FALSE
1632 elif getResponses[ i ] == main.ERROR:
1633 getResults = main.FALSE
1634 utilities.assert_equals( expect=main.TRUE,
1635 actual=getResults,
1636 onpass="Set elements are correct",
1637 onfail="Set elements are incorrect" )
1638
1639 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001640 sizeResponses = main.Cluster.command( "setTestSize",
1641 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001642 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001643 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001644 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001645 if size != sizeResponses[ i ]:
1646 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001647 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001648 " expected a size of " + str( size ) +
1649 " for set " + main.onosSetName +
1650 " but got " + str( sizeResponses[ i ] ) )
1651 utilities.assert_equals( expect=main.TRUE,
1652 actual=sizeResults,
1653 onpass="Set sizes are correct",
1654 onfail="Set sizes are incorrect" )
1655
1656 main.step( "Distributed Set add()" )
1657 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001658 addResponses = main.Cluster.command( "setTestAdd",
1659 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001660 # main.TRUE = successfully changed the set
1661 # main.FALSE = action resulted in no change in set
1662 # main.ERROR - Some error in executing the function
1663 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001664 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001665 if addResponses[ i ] == main.TRUE:
1666 # All is well
1667 pass
1668 elif addResponses[ i ] == main.FALSE:
1669 # Already in set, probably fine
1670 pass
1671 elif addResponses[ i ] == main.ERROR:
1672 # Error in execution
1673 addResults = main.FALSE
1674 else:
1675 # unexpected result
1676 addResults = main.FALSE
1677 if addResults != main.TRUE:
1678 main.log.error( "Error executing set add" )
1679
1680 # Check if set is still correct
1681 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001682 getResponses = main.Cluster.command( "setTestGet",
1683 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001684 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001685 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001686 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001687 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001688 current = set( getResponses[ i ] )
1689 if len( current ) == len( getResponses[ i ] ):
1690 # no repeats
1691 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001692 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001693 " of set " + main.onosSetName + ":\n" +
1694 str( getResponses[ i ] ) )
1695 main.log.debug( "Expected: " + str( main.onosSet ) )
1696 main.log.debug( "Actual: " + str( current ) )
1697 getResults = main.FALSE
1698 else:
1699 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001700 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001701 " set " + main.onosSetName + ":\n" +
1702 str( getResponses[ i ] ) )
1703 getResults = main.FALSE
1704 elif getResponses[ i ] == main.ERROR:
1705 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001706 sizeResponses = main.Cluster.command( "setTestSize",
1707 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001708 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001709 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001710 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001711 if size != sizeResponses[ i ]:
1712 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001713 main.log.error( node + " expected a size of " +
1714 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 " but got " + str( sizeResponses[ i ] ) )
1716 addResults = addResults and getResults and sizeResults
1717 utilities.assert_equals( expect=main.TRUE,
1718 actual=addResults,
1719 onpass="Set add correct",
1720 onfail="Set add was incorrect" )
1721
1722 main.step( "Distributed Set addAll()" )
1723 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001724 addResponses = main.Cluster.command( "setTestAdd",
1725 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001726 # main.TRUE = successfully changed the set
1727 # main.FALSE = action resulted in no change in set
1728 # main.ERROR - Some error in executing the function
1729 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001730 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001731 if addResponses[ i ] == main.TRUE:
1732 # All is well
1733 pass
1734 elif addResponses[ i ] == main.FALSE:
1735 # Already in set, probably fine
1736 pass
1737 elif addResponses[ i ] == main.ERROR:
1738 # Error in execution
1739 addAllResults = main.FALSE
1740 else:
1741 # unexpected result
1742 addAllResults = main.FALSE
1743 if addAllResults != main.TRUE:
1744 main.log.error( "Error executing set addAll" )
1745
1746 # Check if set is still correct
1747 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001748 getResponses = main.Cluster.command( "setTestGet",
1749 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001750 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001751 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001752 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001753 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001754 current = set( getResponses[ i ] )
1755 if len( current ) == len( getResponses[ i ] ):
1756 # no repeats
1757 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001758 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001759 " of set " + main.onosSetName + ":\n" +
1760 str( getResponses[ i ] ) )
1761 main.log.debug( "Expected: " + str( main.onosSet ) )
1762 main.log.debug( "Actual: " + str( current ) )
1763 getResults = main.FALSE
1764 else:
1765 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001766 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001767 " set " + main.onosSetName + ":\n" +
1768 str( getResponses[ i ] ) )
1769 getResults = main.FALSE
1770 elif getResponses[ i ] == main.ERROR:
1771 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001772 sizeResponses = main.Cluster.command( "setTestSize",
1773 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001774 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001775 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001776 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001777 if size != sizeResponses[ i ]:
1778 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001779 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 " for set " + main.onosSetName +
1781 " but got " + str( sizeResponses[ i ] ) )
1782 addAllResults = addAllResults and getResults and sizeResults
1783 utilities.assert_equals( expect=main.TRUE,
1784 actual=addAllResults,
1785 onpass="Set addAll correct",
1786 onfail="Set addAll was incorrect" )
1787
1788 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001789 containsResponses = main.Cluster.command( "setTestGet",
1790 args=[ main.onosSetName ],
1791 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001792 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001793 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001794 if containsResponses[ i ] == main.ERROR:
1795 containsResults = main.FALSE
1796 else:
1797 containsResults = containsResults and\
1798 containsResponses[ i ][ 1 ]
1799 utilities.assert_equals( expect=main.TRUE,
1800 actual=containsResults,
1801 onpass="Set contains is functional",
1802 onfail="Set contains failed" )
1803
1804 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001805 containsAllResponses = main.Cluster.command( "setTestGet",
1806 args=[ main.onosSetName ],
1807 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001808 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001809 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001810 if containsResponses[ i ] == main.ERROR:
1811 containsResults = main.FALSE
1812 else:
1813 containsResults = containsResults and\
1814 containsResponses[ i ][ 1 ]
1815 utilities.assert_equals( expect=main.TRUE,
1816 actual=containsAllResults,
1817 onpass="Set containsAll is functional",
1818 onfail="Set containsAll failed" )
1819
1820 main.step( "Distributed Set remove()" )
1821 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001822 removeResponses = main.Cluster.command( "setTestRemove",
1823 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001824 # main.TRUE = successfully changed the set
1825 # main.FALSE = action resulted in no change in set
1826 # main.ERROR - Some error in executing the function
1827 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001828 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001829 if removeResponses[ i ] == main.TRUE:
1830 # All is well
1831 pass
1832 elif removeResponses[ i ] == main.FALSE:
1833 # not in set, probably fine
1834 pass
1835 elif removeResponses[ i ] == main.ERROR:
1836 # Error in execution
1837 removeResults = main.FALSE
1838 else:
1839 # unexpected result
1840 removeResults = main.FALSE
1841 if removeResults != main.TRUE:
1842 main.log.error( "Error executing set remove" )
1843
1844 # Check if set is still correct
1845 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001846 getResponses = main.Cluster.command( "setTestGet",
1847 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001848 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001849 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001850 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001851 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001852 current = set( getResponses[ i ] )
1853 if len( current ) == len( getResponses[ i ] ):
1854 # no repeats
1855 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001856 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001857 " of set " + main.onosSetName + ":\n" +
1858 str( getResponses[ i ] ) )
1859 main.log.debug( "Expected: " + str( main.onosSet ) )
1860 main.log.debug( "Actual: " + str( current ) )
1861 getResults = main.FALSE
1862 else:
1863 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001864 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001865 " set " + main.onosSetName + ":\n" +
1866 str( getResponses[ i ] ) )
1867 getResults = main.FALSE
1868 elif getResponses[ i ] == main.ERROR:
1869 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001870 sizeResponses = main.Cluster.command( "setTestSize",
1871 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001872 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001873 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001874 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001875 if size != sizeResponses[ i ]:
1876 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001877 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001878 " for set " + main.onosSetName +
1879 " but got " + str( sizeResponses[ i ] ) )
1880 removeResults = removeResults and getResults and sizeResults
1881 utilities.assert_equals( expect=main.TRUE,
1882 actual=removeResults,
1883 onpass="Set remove correct",
1884 onfail="Set remove was incorrect" )
1885
1886 main.step( "Distributed Set removeAll()" )
1887 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001888 removeAllResponses = main.Cluster.command( "setTestRemove",
1889 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001890 # main.TRUE = successfully changed the set
1891 # main.FALSE = action resulted in no change in set
1892 # main.ERROR - Some error in executing the function
1893 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001894 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001895 if removeAllResponses[ i ] == main.TRUE:
1896 # All is well
1897 pass
1898 elif removeAllResponses[ i ] == main.FALSE:
1899 # not in set, probably fine
1900 pass
1901 elif removeAllResponses[ i ] == main.ERROR:
1902 # Error in execution
1903 removeAllResults = main.FALSE
1904 else:
1905 # unexpected result
1906 removeAllResults = main.FALSE
1907 if removeAllResults != main.TRUE:
1908 main.log.error( "Error executing set removeAll" )
1909
1910 # Check if set is still correct
1911 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001912 getResponses = main.Cluster.command( "setTestGet",
1913 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001914 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001915 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001916 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001917 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001918 current = set( getResponses[ i ] )
1919 if len( current ) == len( getResponses[ i ] ):
1920 # no repeats
1921 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001922 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001923 " of set " + main.onosSetName + ":\n" +
1924 str( getResponses[ i ] ) )
1925 main.log.debug( "Expected: " + str( main.onosSet ) )
1926 main.log.debug( "Actual: " + str( current ) )
1927 getResults = main.FALSE
1928 else:
1929 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001930 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001931 " set " + main.onosSetName + ":\n" +
1932 str( getResponses[ i ] ) )
1933 getResults = main.FALSE
1934 elif getResponses[ i ] == main.ERROR:
1935 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001936 sizeResponses = main.Cluster.command( "setTestSize",
1937 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001938 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001939 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001940 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001941 if size != sizeResponses[ i ]:
1942 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001943 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001944 " for set " + main.onosSetName +
1945 " but got " + str( sizeResponses[ i ] ) )
1946 removeAllResults = removeAllResults and getResults and sizeResults
1947 utilities.assert_equals( expect=main.TRUE,
1948 actual=removeAllResults,
1949 onpass="Set removeAll correct",
1950 onfail="Set removeAll was incorrect" )
1951
1952 main.step( "Distributed Set addAll()" )
1953 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001954 addResponses = main.Cluster.command( "setTestAdd",
1955 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001956 # main.TRUE = successfully changed the set
1957 # main.FALSE = action resulted in no change in set
1958 # main.ERROR - Some error in executing the function
1959 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001960 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001961 if addResponses[ i ] == main.TRUE:
1962 # All is well
1963 pass
1964 elif addResponses[ i ] == main.FALSE:
1965 # Already in set, probably fine
1966 pass
1967 elif addResponses[ i ] == main.ERROR:
1968 # Error in execution
1969 addAllResults = main.FALSE
1970 else:
1971 # unexpected result
1972 addAllResults = main.FALSE
1973 if addAllResults != main.TRUE:
1974 main.log.error( "Error executing set addAll" )
1975
1976 # Check if set is still correct
1977 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001978 getResponses = main.Cluster.command( "setTestGet",
1979 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001980 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001981 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001982 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001983 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001984 current = set( getResponses[ i ] )
1985 if len( current ) == len( getResponses[ i ] ):
1986 # no repeats
1987 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001988 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001989 " of set " + main.onosSetName + ":\n" +
1990 str( getResponses[ i ] ) )
1991 main.log.debug( "Expected: " + str( main.onosSet ) )
1992 main.log.debug( "Actual: " + str( current ) )
1993 getResults = main.FALSE
1994 else:
1995 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001996 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001997 " set " + main.onosSetName + ":\n" +
1998 str( getResponses[ i ] ) )
1999 getResults = main.FALSE
2000 elif getResponses[ i ] == main.ERROR:
2001 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002002 sizeResponses = main.Cluster.command( "setTestSize",
2003 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002004 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002005 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002006 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002007 if size != sizeResponses[ i ]:
2008 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002009 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002010 " for set " + main.onosSetName +
2011 " but got " + str( sizeResponses[ i ] ) )
2012 addAllResults = addAllResults and getResults and sizeResults
2013 utilities.assert_equals( expect=main.TRUE,
2014 actual=addAllResults,
2015 onpass="Set addAll correct",
2016 onfail="Set addAll was incorrect" )
2017
2018 main.step( "Distributed Set clear()" )
2019 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002020 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07002021 args=[ main.onosSetName, " " ], # Values doesn't matter
2022 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002023 # main.TRUE = successfully changed the set
2024 # main.FALSE = action resulted in no change in set
2025 # main.ERROR - Some error in executing the function
2026 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002027 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002028 if clearResponses[ i ] == main.TRUE:
2029 # All is well
2030 pass
2031 elif clearResponses[ i ] == main.FALSE:
2032 # Nothing set, probably fine
2033 pass
2034 elif clearResponses[ i ] == main.ERROR:
2035 # Error in execution
2036 clearResults = main.FALSE
2037 else:
2038 # unexpected result
2039 clearResults = main.FALSE
2040 if clearResults != main.TRUE:
2041 main.log.error( "Error executing set clear" )
2042
2043 # Check if set is still correct
2044 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002045 getResponses = main.Cluster.command( "setTestGet",
2046 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002047 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002048 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002049 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002050 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002051 current = set( getResponses[ i ] )
2052 if len( current ) == len( getResponses[ i ] ):
2053 # no repeats
2054 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002055 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002056 " of set " + main.onosSetName + ":\n" +
2057 str( getResponses[ i ] ) )
2058 main.log.debug( "Expected: " + str( main.onosSet ) )
2059 main.log.debug( "Actual: " + str( current ) )
2060 getResults = main.FALSE
2061 else:
2062 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002063 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002064 " set " + main.onosSetName + ":\n" +
2065 str( getResponses[ i ] ) )
2066 getResults = main.FALSE
2067 elif getResponses[ i ] == main.ERROR:
2068 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002069 sizeResponses = main.Cluster.command( "setTestSize",
2070 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002071 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002072 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002073 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002074 if size != sizeResponses[ i ]:
2075 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002076 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002077 " for set " + main.onosSetName +
2078 " but got " + str( sizeResponses[ i ] ) )
2079 clearResults = clearResults and getResults and sizeResults
2080 utilities.assert_equals( expect=main.TRUE,
2081 actual=clearResults,
2082 onpass="Set clear correct",
2083 onfail="Set clear was incorrect" )
2084
2085 main.step( "Distributed Set addAll()" )
2086 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002087 addResponses = main.Cluster.command( "setTestAdd",
2088 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002089 # main.TRUE = successfully changed the set
2090 # main.FALSE = action resulted in no change in set
2091 # main.ERROR - Some error in executing the function
2092 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002093 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002094 if addResponses[ i ] == main.TRUE:
2095 # All is well
2096 pass
2097 elif addResponses[ i ] == main.FALSE:
2098 # Already in set, probably fine
2099 pass
2100 elif addResponses[ i ] == main.ERROR:
2101 # Error in execution
2102 addAllResults = main.FALSE
2103 else:
2104 # unexpected result
2105 addAllResults = main.FALSE
2106 if addAllResults != main.TRUE:
2107 main.log.error( "Error executing set addAll" )
2108
2109 # Check if set is still correct
2110 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002111 getResponses = main.Cluster.command( "setTestGet",
2112 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002113 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002114 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002115 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002116 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002117 current = set( getResponses[ i ] )
2118 if len( current ) == len( getResponses[ i ] ):
2119 # no repeats
2120 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002121 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002122 " of set " + main.onosSetName + ":\n" +
2123 str( getResponses[ i ] ) )
2124 main.log.debug( "Expected: " + str( main.onosSet ) )
2125 main.log.debug( "Actual: " + str( current ) )
2126 getResults = main.FALSE
2127 else:
2128 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002129 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002130 " set " + main.onosSetName + ":\n" +
2131 str( getResponses[ i ] ) )
2132 getResults = main.FALSE
2133 elif getResponses[ i ] == main.ERROR:
2134 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002135 sizeResponses = main.Cluster.command( "setTestSize",
2136 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002137 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002138 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002139 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002140 if size != sizeResponses[ i ]:
2141 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002142 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002143 " for set " + main.onosSetName +
2144 " but got " + str( sizeResponses[ i ] ) )
2145 addAllResults = addAllResults and getResults and sizeResults
2146 utilities.assert_equals( expect=main.TRUE,
2147 actual=addAllResults,
2148 onpass="Set addAll correct",
2149 onfail="Set addAll was incorrect" )
2150
2151 main.step( "Distributed Set retain()" )
2152 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002153 retainResponses = main.Cluster.command( "setTestRemove",
2154 args=[ main.onosSetName, retainValue ],
2155 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002156 # main.TRUE = successfully changed the set
2157 # main.FALSE = action resulted in no change in set
2158 # main.ERROR - Some error in executing the function
2159 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002160 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002161 if retainResponses[ i ] == main.TRUE:
2162 # All is well
2163 pass
2164 elif retainResponses[ i ] == main.FALSE:
2165 # Already in set, probably fine
2166 pass
2167 elif retainResponses[ i ] == main.ERROR:
2168 # Error in execution
2169 retainResults = main.FALSE
2170 else:
2171 # unexpected result
2172 retainResults = main.FALSE
2173 if retainResults != main.TRUE:
2174 main.log.error( "Error executing set retain" )
2175
2176 # Check if set is still correct
2177 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002178 getResponses = main.Cluster.command( "setTestGet",
2179 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002180 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002181 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002182 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002183 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002184 current = set( getResponses[ i ] )
2185 if len( current ) == len( getResponses[ i ] ):
2186 # no repeats
2187 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002188 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002189 " of set " + main.onosSetName + ":\n" +
2190 str( getResponses[ i ] ) )
2191 main.log.debug( "Expected: " + str( main.onosSet ) )
2192 main.log.debug( "Actual: " + str( current ) )
2193 getResults = main.FALSE
2194 else:
2195 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002196 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002197 " set " + main.onosSetName + ":\n" +
2198 str( getResponses[ i ] ) )
2199 getResults = main.FALSE
2200 elif getResponses[ i ] == main.ERROR:
2201 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002202 sizeResponses = main.Cluster.command( "setTestSize",
2203 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002204 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002205 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002206 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002207 if size != sizeResponses[ i ]:
2208 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002209 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002210 str( size ) + " for set " + main.onosSetName +
2211 " but got " + str( sizeResponses[ i ] ) )
2212 retainResults = retainResults and getResults and sizeResults
2213 utilities.assert_equals( expect=main.TRUE,
2214 actual=retainResults,
2215 onpass="Set retain correct",
2216 onfail="Set retain was incorrect" )
2217
2218 # Transactional maps
2219 main.step( "Partitioned Transactional maps put" )
2220 tMapValue = "Testing"
2221 numKeys = 100
2222 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002223 ctrl = main.Cluster.next()
2224 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002225 if putResponses and len( putResponses ) == 100:
2226 for i in putResponses:
2227 if putResponses[ i ][ 'value' ] != tMapValue:
2228 putResult = False
2229 else:
2230 putResult = False
2231 if not putResult:
2232 main.log.debug( "Put response values: " + str( putResponses ) )
2233 utilities.assert_equals( expect=True,
2234 actual=putResult,
2235 onpass="Partitioned Transactional Map put successful",
2236 onfail="Partitioned Transactional Map put values are incorrect" )
2237
2238 main.step( "Partitioned Transactional maps get" )
2239 # FIXME: is this sleep needed?
2240 time.sleep( 5 )
2241
2242 getCheck = True
2243 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002244 getResponses = main.Cluster.command( "transactionalMapGet",
2245 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002246 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002247 for node in getResponses:
2248 if node != tMapValue:
2249 valueCheck = False
2250 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002251 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002252 main.log.warn( getResponses )
2253 getCheck = getCheck and valueCheck
2254 utilities.assert_equals( expect=True,
2255 actual=getCheck,
2256 onpass="Partitioned Transactional Map get values were correct",
2257 onfail="Partitioned Transactional Map values incorrect" )
2258
2259 # DISTRIBUTED ATOMIC VALUE
2260 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002261 getValues = main.Cluster.command( "valueTestGet",
2262 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002263 main.log.debug( getValues )
2264 # Check the results
2265 atomicValueGetResult = True
2266 expected = valueValue if valueValue is not None else "null"
2267 main.log.debug( "Checking for value of " + expected )
2268 for i in getValues:
2269 if i != expected:
2270 atomicValueGetResult = False
2271 utilities.assert_equals( expect=True,
2272 actual=atomicValueGetResult,
2273 onpass="Atomic Value get successful",
2274 onfail="Error getting atomic Value " +
2275 str( valueValue ) + ", found: " +
2276 str( getValues ) )
2277
2278 main.step( "Atomic Value set()" )
2279 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002280 setValues = main.Cluster.command( "valueTestSet",
2281 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002282 main.log.debug( setValues )
2283 # Check the results
2284 atomicValueSetResults = True
2285 for i in setValues:
2286 if i != main.TRUE:
2287 atomicValueSetResults = False
2288 utilities.assert_equals( expect=True,
2289 actual=atomicValueSetResults,
2290 onpass="Atomic Value set successful",
2291 onfail="Error setting atomic Value" +
2292 str( setValues ) )
2293
2294 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002295 getValues = main.Cluster.command( "valueTestGet",
2296 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002297 main.log.debug( getValues )
2298 # Check the results
2299 atomicValueGetResult = True
2300 expected = valueValue if valueValue is not None else "null"
2301 main.log.debug( "Checking for value of " + expected )
2302 for i in getValues:
2303 if i != expected:
2304 atomicValueGetResult = False
2305 utilities.assert_equals( expect=True,
2306 actual=atomicValueGetResult,
2307 onpass="Atomic Value get successful",
2308 onfail="Error getting atomic Value " +
2309 str( valueValue ) + ", found: " +
2310 str( getValues ) )
2311
2312 main.step( "Atomic Value compareAndSet()" )
2313 oldValue = valueValue
2314 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002315 ctrl = main.Cluster.next()
2316 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002317 main.log.debug( CASValue )
2318 utilities.assert_equals( expect=main.TRUE,
2319 actual=CASValue,
2320 onpass="Atomic Value comapreAndSet successful",
2321 onfail="Error setting atomic Value:" +
2322 str( CASValue ) )
2323
2324 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002325 getValues = main.Cluster.command( "valueTestGet",
2326 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002327 main.log.debug( getValues )
2328 # Check the results
2329 atomicValueGetResult = True
2330 expected = valueValue if valueValue is not None else "null"
2331 main.log.debug( "Checking for value of " + expected )
2332 for i in getValues:
2333 if i != expected:
2334 atomicValueGetResult = False
2335 utilities.assert_equals( expect=True,
2336 actual=atomicValueGetResult,
2337 onpass="Atomic Value get successful",
2338 onfail="Error getting atomic Value " +
2339 str( valueValue ) + ", found: " +
2340 str( getValues ) )
2341
2342 main.step( "Atomic Value getAndSet()" )
2343 oldValue = valueValue
2344 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002345 ctrl = main.Cluster.next()
2346 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002347 main.log.debug( GASValue )
2348 expected = oldValue if oldValue is not None else "null"
2349 utilities.assert_equals( expect=expected,
2350 actual=GASValue,
2351 onpass="Atomic Value GAS successful",
2352 onfail="Error with GetAndSet atomic Value: expected " +
2353 str( expected ) + ", found: " +
2354 str( GASValue ) )
2355
2356 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002357 getValues = main.Cluster.command( "valueTestGet",
2358 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002359 main.log.debug( getValues )
2360 # Check the results
2361 atomicValueGetResult = True
2362 expected = valueValue if valueValue is not None else "null"
2363 main.log.debug( "Checking for value of " + expected )
2364 for i in getValues:
2365 if i != expected:
2366 atomicValueGetResult = False
2367 utilities.assert_equals( expect=True,
2368 actual=atomicValueGetResult,
2369 onpass="Atomic Value get successful",
2370 onfail="Error getting atomic Value: expected " +
2371 str( valueValue ) + ", found: " +
2372 str( getValues ) )
2373
2374 main.step( "Atomic Value destory()" )
2375 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002376 ctrl = main.Cluster.next()
2377 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002378 main.log.debug( destroyResult )
2379 # Check the results
2380 utilities.assert_equals( expect=main.TRUE,
2381 actual=destroyResult,
2382 onpass="Atomic Value destroy successful",
2383 onfail="Error destroying atomic Value" )
2384
2385 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002386 getValues = main.Cluster.command( "valueTestGet",
2387 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002388 main.log.debug( getValues )
2389 # Check the results
2390 atomicValueGetResult = True
2391 expected = valueValue if valueValue is not None else "null"
2392 main.log.debug( "Checking for value of " + expected )
2393 for i in getValues:
2394 if i != expected:
2395 atomicValueGetResult = False
2396 utilities.assert_equals( expect=True,
2397 actual=atomicValueGetResult,
2398 onpass="Atomic Value get successful",
2399 onfail="Error getting atomic Value " +
2400 str( valueValue ) + ", found: " +
2401 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002402
2403 # WORK QUEUES
2404 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002405 ctrl = main.Cluster.next()
2406 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002407 workQueuePending += 1
2408 main.log.debug( addResult )
2409 # Check the results
2410 utilities.assert_equals( expect=main.TRUE,
2411 actual=addResult,
2412 onpass="Work Queue add successful",
2413 onfail="Error adding to Work Queue" )
2414
2415 main.step( "Check the work queue stats" )
2416 statsResults = self.workQueueStatsCheck( workQueueName,
2417 workQueueCompleted,
2418 workQueueInProgress,
2419 workQueuePending )
2420 utilities.assert_equals( expect=True,
2421 actual=statsResults,
2422 onpass="Work Queue stats correct",
2423 onfail="Work Queue stats incorrect " )
2424
2425 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002426 ctrl = main.Cluster.next()
2427 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002428 workQueuePending += 2
2429 main.log.debug( addMultipleResult )
2430 # Check the results
2431 utilities.assert_equals( expect=main.TRUE,
2432 actual=addMultipleResult,
2433 onpass="Work Queue add multiple successful",
2434 onfail="Error adding multiple items to Work Queue" )
2435
2436 main.step( "Check the work queue stats" )
2437 statsResults = self.workQueueStatsCheck( workQueueName,
2438 workQueueCompleted,
2439 workQueueInProgress,
2440 workQueuePending )
2441 utilities.assert_equals( expect=True,
2442 actual=statsResults,
2443 onpass="Work Queue stats correct",
2444 onfail="Work Queue stats incorrect " )
2445
2446 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002447 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002448 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002449 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002450 workQueuePending -= number
2451 workQueueCompleted += number
2452 main.log.debug( take1Result )
2453 # Check the results
2454 utilities.assert_equals( expect=main.TRUE,
2455 actual=take1Result,
2456 onpass="Work Queue takeAndComplete 1 successful",
2457 onfail="Error taking 1 from Work Queue" )
2458
2459 main.step( "Check the work queue stats" )
2460 statsResults = self.workQueueStatsCheck( workQueueName,
2461 workQueueCompleted,
2462 workQueueInProgress,
2463 workQueuePending )
2464 utilities.assert_equals( expect=True,
2465 actual=statsResults,
2466 onpass="Work Queue stats correct",
2467 onfail="Work Queue stats incorrect " )
2468
2469 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002470 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002471 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002472 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002473 workQueuePending -= number
2474 workQueueCompleted += number
2475 main.log.debug( take2Result )
2476 # Check the results
2477 utilities.assert_equals( expect=main.TRUE,
2478 actual=take2Result,
2479 onpass="Work Queue takeAndComplete 2 successful",
2480 onfail="Error taking 2 from Work Queue" )
2481
2482 main.step( "Check the work queue stats" )
2483 statsResults = self.workQueueStatsCheck( workQueueName,
2484 workQueueCompleted,
2485 workQueueInProgress,
2486 workQueuePending )
2487 utilities.assert_equals( expect=True,
2488 actual=statsResults,
2489 onpass="Work Queue stats correct",
2490 onfail="Work Queue stats incorrect " )
2491
2492 main.step( "Work Queue destroy()" )
2493 valueValue = None
2494 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002495 ctrl = main.Cluster.next()
2496 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002497 workQueueCompleted = 0
2498 workQueueInProgress = 0
2499 workQueuePending = 0
2500 main.log.debug( destroyResult )
2501 # Check the results
2502 utilities.assert_equals( expect=main.TRUE,
2503 actual=destroyResult,
2504 onpass="Work Queue destroy successful",
2505 onfail="Error destroying Work Queue" )
2506
2507 main.step( "Check the work queue stats" )
2508 statsResults = self.workQueueStatsCheck( workQueueName,
2509 workQueueCompleted,
2510 workQueueInProgress,
2511 workQueuePending )
2512 utilities.assert_equals( expect=True,
2513 actual=statsResults,
2514 onpass="Work Queue stats correct",
2515 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002516 except Exception as e:
2517 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002518
2519 def cleanUp( self, main ):
2520 """
2521 Clean up
2522 """
Devin Lim58046fa2017-07-05 16:55:00 -07002523 assert main, "main not defined"
2524 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002525
2526 # printing colors to terminal
2527 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2528 'blue': '\033[94m', 'green': '\033[92m',
2529 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002530
Devin Lim58046fa2017-07-05 16:55:00 -07002531 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002532
2533 main.step( "Checking raft log size" )
2534 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2535 # get compacted periodically
2536 logCheck = main.Cluster.checkPartitionSize()
2537 utilities.assert_equals( expect=True, actual=logCheck,
2538 onpass="Raft log size is not too big",
2539 onfail="Raft logs grew too big" )
2540
Devin Lim58046fa2017-07-05 16:55:00 -07002541 main.step( "Killing tcpdumps" )
2542 main.Mininet2.stopTcpdump()
2543
2544 testname = main.TEST
2545 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2546 main.step( "Copying MN pcap and ONOS log files to test station" )
2547 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2548 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2549 # NOTE: MN Pcap file is being saved to logdir.
2550 # We scp this file as MN and TestON aren't necessarily the same vm
2551
2552 # FIXME: To be replaced with a Jenkin's post script
2553 # TODO: Load these from params
2554 # NOTE: must end in /
2555 logFolder = "/opt/onos/log/"
2556 logFiles = [ "karaf.log", "karaf.log.1" ]
2557 # NOTE: must end in /
2558 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002559 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002560 dstName = main.logdir + "/" + ctrl.name + "-" + f
2561 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002562 logFolder + f, dstName )
2563 # std*.log's
2564 # NOTE: must end in /
2565 logFolder = "/opt/onos/var/"
2566 logFiles = [ "stderr.log", "stdout.log" ]
2567 # NOTE: must end in /
2568 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002569 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002570 dstName = main.logdir + "/" + ctrl.name + "-" + f
2571 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002572 logFolder + f, dstName )
2573 else:
2574 main.log.debug( "skipping saving log files" )
2575
Jon Hall5d5876e2017-11-30 09:33:16 -08002576 main.step( "Checking ONOS Logs for errors" )
2577 for ctrl in main.Cluster.runningNodes:
2578 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2579 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2580
Devin Lim58046fa2017-07-05 16:55:00 -07002581 main.step( "Stopping Mininet" )
2582 mnResult = main.Mininet1.stopNet()
2583 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2584 onpass="Mininet stopped",
2585 onfail="MN cleanup NOT successful" )
2586
Devin Lim58046fa2017-07-05 16:55:00 -07002587 try:
2588 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2589 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2590 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2591 timerLog.close()
2592 except NameError as e:
2593 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002594
Devin Lim58046fa2017-07-05 16:55:00 -07002595 def assignMastership( self, main ):
2596 """
2597 Assign mastership to controllers
2598 """
2599 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002600 assert main, "main not defined"
2601 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002602
2603 main.case( "Assigning Controller roles for switches" )
2604 main.caseExplanation = "Check that ONOS is connected to each " +\
2605 "device. Then manually assign" +\
2606 " mastership to specific ONOS nodes using" +\
2607 " 'device-role'"
2608 main.step( "Assign mastership of switches to specific controllers" )
2609 # Manually assign mastership to the controller we want
2610 roleCall = main.TRUE
2611
2612 ipList = []
2613 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002614 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002615 try:
2616 # Assign mastership to specific controllers. This assignment was
2617 # determined for a 7 node cluser, but will work with any sized
2618 # cluster
2619 for i in range( 1, 29 ): # switches 1 through 28
2620 # set up correct variables:
2621 if i == 1:
2622 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002623 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002624 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2625 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002626 c = 1 % main.Cluster.numCtrls
2627 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002628 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2629 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002630 c = 1 % main.Cluster.numCtrls
2631 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002632 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2633 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002634 c = 3 % main.Cluster.numCtrls
2635 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002636 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2637 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002638 c = 2 % main.Cluster.numCtrls
2639 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002640 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2641 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002642 c = 2 % main.Cluster.numCtrls
2643 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002644 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2645 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002646 c = 5 % main.Cluster.numCtrls
2647 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002648 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2649 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002650 c = 4 % main.Cluster.numCtrls
2651 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002652 dpid = '3' + str( i ).zfill( 3 )
2653 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2654 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002655 c = 6 % main.Cluster.numCtrls
2656 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002657 dpid = '6' + str( i ).zfill( 3 )
2658 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2659 elif i == 28:
2660 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002661 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002662 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2663 else:
2664 main.log.error( "You didn't write an else statement for " +
2665 "switch s" + str( i ) )
2666 roleCall = main.FALSE
2667 # Assign switch
2668 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2669 # TODO: make this controller dynamic
2670 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2671 ipList.append( ip )
2672 deviceList.append( deviceId )
2673 except ( AttributeError, AssertionError ):
2674 main.log.exception( "Something is wrong with ONOS device view" )
2675 main.log.info( onosCli.devices() )
2676 utilities.assert_equals(
2677 expect=main.TRUE,
2678 actual=roleCall,
2679 onpass="Re-assigned switch mastership to designated controller",
2680 onfail="Something wrong with deviceRole calls" )
2681
2682 main.step( "Check mastership was correctly assigned" )
2683 roleCheck = main.TRUE
2684 # NOTE: This is due to the fact that device mastership change is not
2685 # atomic and is actually a multi step process
2686 time.sleep( 5 )
2687 for i in range( len( ipList ) ):
2688 ip = ipList[ i ]
2689 deviceId = deviceList[ i ]
2690 # Check assignment
2691 master = onosCli.getRole( deviceId ).get( 'master' )
2692 if ip in master:
2693 roleCheck = roleCheck and main.TRUE
2694 else:
2695 roleCheck = roleCheck and main.FALSE
2696 main.log.error( "Error, controller " + ip + " is not" +
2697 " master " + "of device " +
2698 str( deviceId ) + ". Master is " +
2699 repr( master ) + "." )
2700 utilities.assert_equals(
2701 expect=main.TRUE,
2702 actual=roleCheck,
2703 onpass="Switches were successfully reassigned to designated " +
2704 "controller",
2705 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002706
Jon Hall5d5876e2017-11-30 09:33:16 -08002707 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002708 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002709 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002710 """
2711 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002714 assert main.kill, "main.kill not defined"
2715 main.case( "Restart minority of ONOS nodes" )
2716
2717 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2718 startResults = main.TRUE
2719 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002720 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002721 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002722 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002723 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2724 onpass="ONOS nodes started successfully",
2725 onfail="ONOS nodes NOT successfully started" )
2726
2727 main.step( "Checking if ONOS is up yet" )
2728 count = 0
2729 onosIsupResult = main.FALSE
2730 while onosIsupResult == main.FALSE and count < 10:
2731 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002732 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002733 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002734 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002735 count = count + 1
2736 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2737 onpass="ONOS restarted successfully",
2738 onfail="ONOS restart NOT successful" )
2739
Jon Hall5d5876e2017-11-30 09:33:16 -08002740 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002741 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002742 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002743 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002744 ctrl.startOnosCli( ctrl.ipAddress )
2745 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002746 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002747 onpass="ONOS node(s) restarted",
2748 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002749
Jon Hall5d5876e2017-11-30 09:33:16 -08002750 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002751 main.restartTime = time.time() - restartTime
2752 main.log.debug( "Restart time: " + str( main.restartTime ) )
2753 # TODO: MAke this configurable. Also, we are breaking the above timer
2754 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002755 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002756 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002757 sleep=15,
2758 attempts=5 )
2759
2760 utilities.assert_equals( expect=True, actual=nodeResults,
2761 onpass="Nodes check successful",
2762 onfail="Nodes check NOT successful" )
2763
2764 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002765 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002766 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002767 ctrl.name,
2768 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002769 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002770 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002771
Jon Hallca319892017-06-15 15:25:22 -07002772 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002773
2774 main.step( "Rerun for election on the node(s) that were killed" )
2775 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002776 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002777 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002778 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002779 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2780 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002781 onfail="Error rerunning for election" )
2782
2783 def upgradeNodes( self, main ):
2784 """
2785 Reinstall some nodes with an upgraded version.
2786
2787 This will reinstall nodes in main.kill with an upgraded version.
2788 """
2789 import time
2790 assert main, "main not defined"
2791 assert utilities.assert_equals, "utilities.assert_equals not defined"
2792 assert main.kill, "main.kill not defined"
2793 nodeNames = [ node.name for node in main.kill ]
2794 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2795
2796 stopResults = main.TRUE
2797 uninstallResults = main.TRUE
2798 startResults = main.TRUE
2799 sshResults = main.TRUE
2800 isup = main.TRUE
2801 restartTime = time.time()
2802 for ctrl in main.kill:
2803 stopResults = stopResults and\
2804 ctrl.onosStop( ctrl.ipAddress )
2805 uninstallResults = uninstallResults and\
2806 ctrl.onosUninstall( ctrl.ipAddress )
2807 # Install the new version of onos
2808 startResults = startResults and\
2809 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2810 sshResults = sshResults and\
2811 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2812 isup = isup and ctrl.isup( ctrl.ipAddress )
2813 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2814 onpass="ONOS nodes stopped successfully",
2815 onfail="ONOS nodes NOT successfully stopped" )
2816 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2817 onpass="ONOS nodes uninstalled successfully",
2818 onfail="ONOS nodes NOT successfully uninstalled" )
2819 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2820 onpass="ONOS nodes started successfully",
2821 onfail="ONOS nodes NOT successfully started" )
2822 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2823 onpass="Successfully secured onos ssh",
2824 onfail="Failed to secure onos ssh" )
2825 utilities.assert_equals( expect=main.TRUE, actual=isup,
2826 onpass="ONOS nodes fully started",
2827 onfail="ONOS nodes NOT fully started" )
2828
2829 main.step( "Restarting ONOS CLI" )
2830 cliResults = main.TRUE
2831 for ctrl in main.kill:
2832 cliResults = cliResults and\
2833 ctrl.startOnosCli( ctrl.ipAddress )
2834 ctrl.active = True
2835 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2836 onpass="ONOS node(s) restarted",
2837 onfail="ONOS node(s) did not restart" )
2838
2839 # Grab the time of restart so we can have some idea of average time
2840 main.restartTime = time.time() - restartTime
2841 main.log.debug( "Restart time: " + str( main.restartTime ) )
2842 # TODO: Make this configurable.
2843 main.step( "Checking ONOS nodes" )
2844 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2845 False,
2846 sleep=15,
2847 attempts=5 )
2848
2849 utilities.assert_equals( expect=True, actual=nodeResults,
2850 onpass="Nodes check successful",
2851 onfail="Nodes check NOT successful" )
2852
2853 if not nodeResults:
2854 for ctrl in main.Cluster.active():
2855 main.log.debug( "{} components not ACTIVE: \n{}".format(
2856 ctrl.name,
2857 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
2858 main.log.error( "Failed to start ONOS, stopping test" )
2859 main.cleanAndExit()
2860
2861 self.commonChecks()
2862
2863 main.step( "Rerun for election on the node(s) that were killed" )
2864 runResults = main.TRUE
2865 for ctrl in main.kill:
2866 runResults = runResults and\
2867 ctrl.electionTestRun()
2868 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2869 onpass="ONOS nodes reran for election topic",
2870 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002871
Devin Lim142b5342017-07-20 15:22:39 -07002872 def tempCell( self, cellName, ipList ):
2873 main.step( "Create cell file" )
2874 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002875
Devin Lim142b5342017-07-20 15:22:39 -07002876 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2877 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002878 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002879 main.step( "Applying cell variable to environment" )
2880 cellResult = main.ONOSbench.setCell( cellName )
2881 verifyResult = main.ONOSbench.verifyCell()
2882
Devin Lim142b5342017-07-20 15:22:39 -07002883 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002884 """
2885 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002886 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002887 1: scaling
2888 """
2889 """
2890 Check state after ONOS failure/scaling
2891 """
2892 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002893 assert main, "main not defined"
2894 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002895 main.case( "Running ONOS Constant State Tests" )
2896
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002897 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002898
Devin Lim58046fa2017-07-05 16:55:00 -07002899 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002900 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002901
Devin Lim142b5342017-07-20 15:22:39 -07002902 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002903 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002904
2905 if rolesResults and not consistentMastership:
2906 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002907 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002908 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002909 json.dumps( json.loads( ONOSMastership[ i ] ),
2910 sort_keys=True,
2911 indent=4,
2912 separators=( ',', ': ' ) ) )
2913
2914 if compareSwitch:
2915 description2 = "Compare switch roles from before failure"
2916 main.step( description2 )
2917 try:
2918 currentJson = json.loads( ONOSMastership[ 0 ] )
2919 oldJson = json.loads( mastershipState )
2920 except ( ValueError, TypeError ):
2921 main.log.exception( "Something is wrong with parsing " +
2922 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002923 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2924 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002925 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002926 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002927 for swName, swDetails in main.Mininet1.getSwitches().items():
2928 switchDPID = swDetails[ 'dpid' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002929 current = [ switch[ 'master' ] for switch in currentJson
2930 if switchDPID in switch[ 'id' ] ]
2931 old = [ switch[ 'master' ] for switch in oldJson
2932 if switchDPID in switch[ 'id' ] ]
2933 if current == old:
2934 mastershipCheck = mastershipCheck and main.TRUE
2935 else:
2936 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2937 mastershipCheck = main.FALSE
2938 utilities.assert_equals(
2939 expect=main.TRUE,
2940 actual=mastershipCheck,
2941 onpass="Mastership of Switches was not changed",
2942 onfail="Mastership of some switches changed" )
2943
2944 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002945 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002946 intentCheck = main.FALSE
2947 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002948
2949 main.step( "Check for consistency in Intents from each controller" )
2950 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2951 main.log.info( "Intents are consistent across all ONOS " +
2952 "nodes" )
2953 else:
2954 consistentIntents = False
2955
2956 # Try to make it easy to figure out what is happening
2957 #
2958 # Intent ONOS1 ONOS2 ...
2959 # 0x01 INSTALLED INSTALLING
2960 # ... ... ...
2961 # ... ... ...
2962 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002963 for ctrl in main.Cluster.active():
2964 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002965 main.log.warn( title )
2966 # get all intent keys in the cluster
2967 keys = []
2968 for nodeStr in ONOSIntents:
2969 node = json.loads( nodeStr )
2970 for intent in node:
2971 keys.append( intent.get( 'id' ) )
2972 keys = set( keys )
2973 for key in keys:
2974 row = "%-13s" % key
2975 for nodeStr in ONOSIntents:
2976 node = json.loads( nodeStr )
2977 for intent in node:
2978 if intent.get( 'id' ) == key:
2979 row += "%-15s" % intent.get( 'state' )
2980 main.log.warn( row )
2981 # End table view
2982
2983 utilities.assert_equals(
2984 expect=True,
2985 actual=consistentIntents,
2986 onpass="Intents are consistent across all ONOS nodes",
2987 onfail="ONOS nodes have different views of intents" )
2988 intentStates = []
2989 for node in ONOSIntents: # Iter through ONOS nodes
2990 nodeStates = []
2991 # Iter through intents of a node
2992 try:
2993 for intent in json.loads( node ):
2994 nodeStates.append( intent[ 'state' ] )
2995 except ( ValueError, TypeError ):
2996 main.log.exception( "Error in parsing intents" )
2997 main.log.error( repr( node ) )
2998 intentStates.append( nodeStates )
2999 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
3000 main.log.info( dict( out ) )
3001
3002 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07003003 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07003004 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07003005 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07003006 main.log.warn( json.dumps(
3007 json.loads( ONOSIntents[ i ] ),
3008 sort_keys=True,
3009 indent=4,
3010 separators=( ',', ': ' ) ) )
3011 elif intentsResults and consistentIntents:
3012 intentCheck = main.TRUE
3013
3014 # NOTE: Store has no durability, so intents are lost across system
3015 # restarts
3016 if not isRestart:
3017 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
3018 # NOTE: this requires case 5 to pass for intentState to be set.
3019 # maybe we should stop the test if that fails?
3020 sameIntents = main.FALSE
3021 try:
3022 intentState
3023 except NameError:
3024 main.log.warn( "No previous intent state was saved" )
3025 else:
3026 if intentState and intentState == ONOSIntents[ 0 ]:
3027 sameIntents = main.TRUE
3028 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
3029 # TODO: possibly the states have changed? we may need to figure out
3030 # what the acceptable states are
3031 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
3032 sameIntents = main.TRUE
3033 try:
3034 before = json.loads( intentState )
3035 after = json.loads( ONOSIntents[ 0 ] )
3036 for intent in before:
3037 if intent not in after:
3038 sameIntents = main.FALSE
3039 main.log.debug( "Intent is not currently in ONOS " +
3040 "(at least in the same form):" )
3041 main.log.debug( json.dumps( intent ) )
3042 except ( ValueError, TypeError ):
3043 main.log.exception( "Exception printing intents" )
3044 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3045 main.log.debug( repr( intentState ) )
3046 if sameIntents == main.FALSE:
3047 try:
3048 main.log.debug( "ONOS intents before: " )
3049 main.log.debug( json.dumps( json.loads( intentState ),
3050 sort_keys=True, indent=4,
3051 separators=( ',', ': ' ) ) )
3052 main.log.debug( "Current ONOS intents: " )
3053 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3054 sort_keys=True, indent=4,
3055 separators=( ',', ': ' ) ) )
3056 except ( ValueError, TypeError ):
3057 main.log.exception( "Exception printing intents" )
3058 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3059 main.log.debug( repr( intentState ) )
3060 utilities.assert_equals(
3061 expect=main.TRUE,
3062 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003063 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003064 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3065 intentCheck = intentCheck and sameIntents
3066
3067 main.step( "Get the OF Table entries and compare to before " +
3068 "component " + OnosAfterWhich[ afterWhich ] )
3069 FlowTables = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08003070 for switch in main.Mininet1.getSwitches().keys():
3071 main.log.info( "Checking flow table on " + switch )
3072 tmpFlows = main.Mininet1.getFlowTable( switch, version="1.3", debug=False )
3073 curSwitch = main.Mininet1.flowTableComp( flows[ switch ], tmpFlows )
Devin Lim58046fa2017-07-05 16:55:00 -07003074 FlowTables = FlowTables and curSwitch
3075 if curSwitch == main.FALSE:
Jon Hallab611372018-02-21 15:26:05 -08003076 main.log.warn( "Differences in flow table for switch: {}".format( switch ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003077 utilities.assert_equals(
3078 expect=main.TRUE,
3079 actual=FlowTables,
3080 onpass="No changes were found in the flow tables",
3081 onfail="Changes were found in the flow tables" )
3082
Jon Hallca319892017-06-15 15:25:22 -07003083 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003084 """
3085 main.step( "Check the continuous pings to ensure that no packets " +
3086 "were dropped during component failure" )
3087 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3088 main.params[ 'TESTONIP' ] )
3089 LossInPings = main.FALSE
3090 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3091 for i in range( 8, 18 ):
3092 main.log.info(
3093 "Checking for a loss in pings along flow from s" +
3094 str( i ) )
3095 LossInPings = main.Mininet2.checkForLoss(
3096 "/tmp/ping.h" +
3097 str( i ) ) or LossInPings
3098 if LossInPings == main.TRUE:
3099 main.log.info( "Loss in ping detected" )
3100 elif LossInPings == main.ERROR:
3101 main.log.info( "There are multiple mininet process running" )
3102 elif LossInPings == main.FALSE:
3103 main.log.info( "No Loss in the pings" )
3104 main.log.info( "No loss of dataplane connectivity" )
3105 utilities.assert_equals(
3106 expect=main.FALSE,
3107 actual=LossInPings,
3108 onpass="No Loss of connectivity",
3109 onfail="Loss of dataplane connectivity detected" )
3110 # NOTE: Since intents are not persisted with IntnentStore,
3111 # we expect loss in dataplane connectivity
3112 LossInPings = main.FALSE
3113 """
Devin Lim58046fa2017-07-05 16:55:00 -07003114 def compareTopo( self, main ):
3115 """
3116 Compare topo
3117 """
3118 import json
3119 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003120 assert main, "main not defined"
3121 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003122 try:
3123 from tests.dependencies.topology import Topology
3124 except ImportError:
3125 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003126 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003127 try:
3128 main.topoRelated
3129 except ( NameError, AttributeError ):
3130 main.topoRelated = Topology()
3131 main.case( "Compare ONOS Topology view to Mininet topology" )
3132 main.caseExplanation = "Compare topology objects between Mininet" +\
3133 " and ONOS"
3134 topoResult = main.FALSE
3135 topoFailMsg = "ONOS topology don't match Mininet"
3136 elapsed = 0
3137 count = 0
3138 main.step( "Comparing ONOS topology to MN topology" )
3139 startTime = time.time()
3140 # Give time for Gossip to work
3141 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3142 devicesResults = main.TRUE
3143 linksResults = main.TRUE
3144 hostsResults = main.TRUE
3145 hostAttachmentResults = True
3146 count += 1
3147 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003148 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003149 kwargs={ 'sleep': 5, 'attempts': 5,
3150 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003151 ipResult = main.TRUE
3152
Devin Lim142b5342017-07-20 15:22:39 -07003153 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003154 kwargs={ 'sleep': 5, 'attempts': 5,
3155 'randomTime': True },
3156 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003157
3158 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003159 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003160 if hosts[ controller ]:
3161 for host in hosts[ controller ]:
3162 if host is None or host.get( 'ipAddresses', [] ) == []:
3163 main.log.error(
3164 "Error with host ipAddresses on controller" +
3165 controllerStr + ": " + str( host ) )
3166 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003167 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003168 kwargs={ 'sleep': 5, 'attempts': 5,
3169 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003170 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003171 kwargs={ 'sleep': 5, 'attempts': 5,
3172 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003173 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003174 kwargs={ 'sleep': 5, 'attempts': 5,
3175 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003176
3177 elapsed = time.time() - startTime
3178 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003179 main.log.debug( "Elapsed time: " + str( elapsed ) )
3180 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003181
3182 if all( e is None for e in devices ) and\
3183 all( e is None for e in hosts ) and\
3184 all( e is None for e in ports ) and\
3185 all( e is None for e in links ) and\
3186 all( e is None for e in clusters ):
3187 topoFailMsg = "Could not get topology from ONOS"
3188 main.log.error( topoFailMsg )
3189 continue # Try again, No use trying to compare
3190
3191 mnSwitches = main.Mininet1.getSwitches()
3192 mnLinks = main.Mininet1.getLinks()
3193 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003194 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003195 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003196 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3197 controller,
3198 mnSwitches,
3199 devices,
3200 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003201 utilities.assert_equals( expect=main.TRUE,
3202 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003203 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003204 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003205 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003206 " Switches view is incorrect" )
3207
Devin Lim58046fa2017-07-05 16:55:00 -07003208 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003209 main.Mininet1.compareLinks,
3210 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003211 utilities.assert_equals( expect=main.TRUE,
3212 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003213 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003214 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003215 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003216 " links view is incorrect" )
3217 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3218 currentHostsResult = main.Mininet1.compareHosts(
3219 mnHosts,
3220 hosts[ controller ] )
3221 elif hosts[ controller ] == []:
3222 currentHostsResult = main.TRUE
3223 else:
3224 currentHostsResult = main.FALSE
3225 utilities.assert_equals( expect=main.TRUE,
3226 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003227 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003228 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003229 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003230 " hosts don't match Mininet" )
Devin Lim58046fa2017-07-05 16:55:00 -07003231 hostAttachment = True
Jon Hallab611372018-02-21 15:26:05 -08003232 if main.topoMappings:
3233 ctrl = main.Cluster.next()
3234 # CHECKING HOST ATTACHMENT POINTS
3235 zeroHosts = False
3236 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3237 if hosts[ controller ] == []:
3238 main.log.warn( "There are no hosts discovered" )
3239 zeroHosts = True
3240 else:
3241 for host in hosts[ controller ]:
3242 mac = None
3243 locations = []
3244 device = None
3245 port = None
3246 try:
3247 mac = host.get( 'mac' )
3248 assert mac, "mac field could not be found for this host object"
3249 if 'locations' in host:
3250 locations = host.get( 'locations' )
3251 elif 'location' in host:
3252 locations.append( host.get( 'location' ) )
3253 assert locations, "locations field could not be found for this host object"
Devin Lim58046fa2017-07-05 16:55:00 -07003254
Jon Hallab611372018-02-21 15:26:05 -08003255 # Trim the protocol identifier off deviceId
3256 device = str( locations[0].get( 'elementId' ) ).split( ':' )[ 1 ]
3257 assert device, "elementId field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003258
Jon Hallab611372018-02-21 15:26:05 -08003259 port = locations[0].get( 'port' )
3260 assert port, "port field could not be found for this host location object"
3261 main.log.debug( "Host: {}\nmac: {}\n location(s): {}\ndevice: {}\n port: {}".format(
3262 ctrl.pprint( host ), mac, ctrl.pprint( locations ), device, port ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003263
Jon Hallab611372018-02-21 15:26:05 -08003264 # Now check if this matches where they should be
3265 if mac and device and port:
3266 if str( port ) != "1":
3267 main.log.error( "The attachment port is incorrect for " +
3268 "host " + str( mac ) +
3269 ". Expected: 1 Actual: " + str( port ) )
3270 hostAttachment = False
3271 if device != main.topoMappings[ str( mac ) ]:
3272 main.log.error( "The attachment device is incorrect for " +
3273 "host " + str( mac ) +
3274 ". Expected: " + main.topoMppings[ str( mac ) ] +
3275 " Actual: " + device )
3276 hostAttachment = False
3277 else:
Devin Lim58046fa2017-07-05 16:55:00 -07003278 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003279 except ( AssertionError, TypeError ):
3280 main.log.exception( "Json object not as expected" )
3281 main.log.error( repr( host ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003282 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003283 else:
3284 main.log.error( "No hosts json output or \"Error\"" +
3285 " in output. hosts = " +
3286 repr( hosts[ controller ] ) )
3287 if zeroHosts is False:
3288 # TODO: Find a way to know if there should be hosts in a
3289 # given point of the test
3290 hostAttachment = True
Devin Lim58046fa2017-07-05 16:55:00 -07003291
Jon Hallab611372018-02-21 15:26:05 -08003292 # END CHECKING HOST ATTACHMENT POINTS
Devin Lim58046fa2017-07-05 16:55:00 -07003293 devicesResults = devicesResults and currentDevicesResult
3294 linksResults = linksResults and currentLinksResult
3295 hostsResults = hostsResults and currentHostsResult
3296 hostAttachmentResults = hostAttachmentResults and\
3297 hostAttachment
3298 topoResult = ( devicesResults and linksResults
3299 and hostsResults and ipResult and
3300 hostAttachmentResults )
3301 utilities.assert_equals( expect=True,
3302 actual=topoResult,
3303 onpass="ONOS topology matches Mininet",
3304 onfail=topoFailMsg )
3305 # End of While loop to pull ONOS state
3306
3307 # Compare json objects for hosts and dataplane clusters
3308
3309 # hosts
3310 main.step( "Hosts view is consistent across all ONOS nodes" )
3311 consistentHostsResult = main.TRUE
3312 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003313 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003314 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3315 if hosts[ controller ] == hosts[ 0 ]:
3316 continue
3317 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003318 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003319 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003320 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003321 consistentHostsResult = main.FALSE
3322
3323 else:
Jon Hallca319892017-06-15 15:25:22 -07003324 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003325 controllerStr )
3326 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003327 main.log.debug( controllerStr +
3328 " hosts response: " +
3329 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003330 utilities.assert_equals(
3331 expect=main.TRUE,
3332 actual=consistentHostsResult,
3333 onpass="Hosts view is consistent across all ONOS nodes",
3334 onfail="ONOS nodes have different views of hosts" )
3335
3336 main.step( "Hosts information is correct" )
3337 hostsResults = hostsResults and ipResult
3338 utilities.assert_equals(
3339 expect=main.TRUE,
3340 actual=hostsResults,
3341 onpass="Host information is correct",
3342 onfail="Host information is incorrect" )
3343
3344 main.step( "Host attachment points to the network" )
3345 utilities.assert_equals(
3346 expect=True,
3347 actual=hostAttachmentResults,
3348 onpass="Hosts are correctly attached to the network",
3349 onfail="ONOS did not correctly attach hosts to the network" )
3350
3351 # Strongly connected clusters of devices
3352 main.step( "Clusters view is consistent across all ONOS nodes" )
3353 consistentClustersResult = main.TRUE
3354 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003355 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003356 if "Error" not in clusters[ controller ]:
3357 if clusters[ controller ] == clusters[ 0 ]:
3358 continue
3359 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003360 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003361 controllerStr +
3362 " is inconsistent with ONOS1" )
3363 consistentClustersResult = main.FALSE
3364 else:
3365 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003366 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003367 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003368 main.log.debug( controllerStr +
3369 " clusters response: " +
3370 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003371 utilities.assert_equals(
3372 expect=main.TRUE,
3373 actual=consistentClustersResult,
3374 onpass="Clusters view is consistent across all ONOS nodes",
3375 onfail="ONOS nodes have different views of clusters" )
3376 if not consistentClustersResult:
3377 main.log.debug( clusters )
3378 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003379 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003380
3381 main.step( "There is only one SCC" )
3382 # there should always only be one cluster
3383 try:
3384 numClusters = len( json.loads( clusters[ 0 ] ) )
3385 except ( ValueError, TypeError ):
3386 main.log.exception( "Error parsing clusters[0]: " +
3387 repr( clusters[ 0 ] ) )
3388 numClusters = "ERROR"
3389 clusterResults = main.FALSE
3390 if numClusters == 1:
3391 clusterResults = main.TRUE
3392 utilities.assert_equals(
3393 expect=1,
3394 actual=numClusters,
3395 onpass="ONOS shows 1 SCC",
3396 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3397
3398 topoResult = ( devicesResults and linksResults
3399 and hostsResults and consistentHostsResult
3400 and consistentClustersResult and clusterResults
3401 and ipResult and hostAttachmentResults )
3402
3403 topoResult = topoResult and int( count <= 2 )
3404 note = "note it takes about " + str( int( cliTime ) ) + \
3405 " seconds for the test to make all the cli calls to fetch " +\
3406 "the topology from each ONOS instance"
3407 main.log.info(
3408 "Very crass estimate for topology discovery/convergence( " +
3409 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3410 str( count ) + " tries" )
3411
3412 main.step( "Device information is correct" )
3413 utilities.assert_equals(
3414 expect=main.TRUE,
3415 actual=devicesResults,
3416 onpass="Device information is correct",
3417 onfail="Device information is incorrect" )
3418
3419 main.step( "Links are correct" )
3420 utilities.assert_equals(
3421 expect=main.TRUE,
3422 actual=linksResults,
3423 onpass="Link are correct",
3424 onfail="Links are incorrect" )
3425
3426 main.step( "Hosts are correct" )
3427 utilities.assert_equals(
3428 expect=main.TRUE,
3429 actual=hostsResults,
3430 onpass="Hosts are correct",
3431 onfail="Hosts are incorrect" )
3432
3433 # FIXME: move this to an ONOS state case
3434 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003435 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003436 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003437 attempts=5 )
3438 utilities.assert_equals( expect=True, actual=nodeResults,
3439 onpass="Nodes check successful",
3440 onfail="Nodes check NOT successful" )
3441 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003442 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003443 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003444 ctrl.name,
3445 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003446
3447 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003448 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003449
Jon Hallab611372018-02-21 15:26:05 -08003450 def linkDown( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003451 """
Jon Hallab611372018-02-21 15:26:05 -08003452 Link src-dst down
Devin Lim58046fa2017-07-05 16:55:00 -07003453 """
3454 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003455 assert main, "main not defined"
3456 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003457 # NOTE: You should probably run a topology check after this
3458
3459 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3460
3461 description = "Turn off a link to ensure that Link Discovery " +\
3462 "is working properly"
3463 main.case( description )
3464
Jon Hallab611372018-02-21 15:26:05 -08003465 main.step( "Kill Link between " + src + " and " + dst )
3466 LinkDown = main.Mininet1.link( END1=src, END2=dst, OPTION="down" )
Devin Lim58046fa2017-07-05 16:55:00 -07003467 main.log.info( "Waiting " + str( linkSleep ) +
3468 " seconds for link down to be discovered" )
3469 time.sleep( linkSleep )
3470 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3471 onpass="Link down successful",
3472 onfail="Failed to bring link down" )
3473 # TODO do some sort of check here
3474
Jon Hallab611372018-02-21 15:26:05 -08003475 def linkUp( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003476 """
Jon Hallab611372018-02-21 15:26:05 -08003477 Link src-dst up
Devin Lim58046fa2017-07-05 16:55:00 -07003478 """
3479 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003480 assert main, "main not defined"
3481 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003482 # NOTE: You should probably run a topology check after this
3483
3484 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3485
3486 description = "Restore a link to ensure that Link Discovery is " + \
3487 "working properly"
3488 main.case( description )
3489
Jon Hallab611372018-02-21 15:26:05 -08003490 main.step( "Bring link between " + src + " and " + dst + " back up" )
3491 LinkUp = main.Mininet1.link( END1=src, END2=dst, OPTION="up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003492 main.log.info( "Waiting " + str( linkSleep ) +
3493 " seconds for link up to be discovered" )
3494 time.sleep( linkSleep )
3495 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3496 onpass="Link up successful",
3497 onfail="Failed to bring link up" )
3498
3499 def switchDown( self, main ):
3500 """
3501 Switch Down
3502 """
3503 # NOTE: You should probably run a topology check after this
3504 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003505 assert main, "main not defined"
3506 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003507
3508 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3509
3510 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003511 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003512 main.case( description )
3513 switch = main.params[ 'kill' ][ 'switch' ]
3514 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3515
3516 # TODO: Make this switch parameterizable
3517 main.step( "Kill " + switch )
3518 main.log.info( "Deleting " + switch )
3519 main.Mininet1.delSwitch( switch )
3520 main.log.info( "Waiting " + str( switchSleep ) +
3521 " seconds for switch down to be discovered" )
3522 time.sleep( switchSleep )
3523 device = onosCli.getDevice( dpid=switchDPID )
3524 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003525 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003526 result = main.FALSE
3527 if device and device[ 'available' ] is False:
3528 result = main.TRUE
3529 utilities.assert_equals( expect=main.TRUE, actual=result,
3530 onpass="Kill switch successful",
3531 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003532
Devin Lim58046fa2017-07-05 16:55:00 -07003533 def switchUp( self, main ):
3534 """
3535 Switch Up
3536 """
3537 # NOTE: You should probably run a topology check after this
3538 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003539 assert main, "main not defined"
3540 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003541
3542 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3543 switch = main.params[ 'kill' ][ 'switch' ]
3544 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3545 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003546 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003547 description = "Adding a switch to ensure it is discovered correctly"
3548 main.case( description )
3549
3550 main.step( "Add back " + switch )
3551 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3552 for peer in links:
3553 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003554 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003555 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3556 main.log.info( "Waiting " + str( switchSleep ) +
3557 " seconds for switch up to be discovered" )
3558 time.sleep( switchSleep )
3559 device = onosCli.getDevice( dpid=switchDPID )
3560 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003561 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003562 result = main.FALSE
3563 if device and device[ 'available' ]:
3564 result = main.TRUE
3565 utilities.assert_equals( expect=main.TRUE, actual=result,
3566 onpass="add switch successful",
3567 onfail="Failed to add switch?" )
3568
3569 def startElectionApp( self, main ):
3570 """
3571 start election app on all onos nodes
3572 """
Devin Lim58046fa2017-07-05 16:55:00 -07003573 assert main, "main not defined"
3574 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003575
3576 main.case( "Start Leadership Election app" )
3577 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003578 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003579 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003580 utilities.assert_equals(
3581 expect=main.TRUE,
3582 actual=appResult,
3583 onpass="Election app installed",
3584 onfail="Something went wrong with installing Leadership election" )
3585
3586 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003587 onosCli.electionTestRun()
3588 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003589 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003590 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003591 utilities.assert_equals(
3592 expect=True,
3593 actual=sameResult,
3594 onpass="All nodes see the same leaderboards",
3595 onfail="Inconsistent leaderboards" )
3596
3597 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003598 # Check that the leader is one of the active nodes
3599 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003600 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003601 if leader in ips:
3602 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003603 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003604 legitimate = False
3605 main.log.debug( leaders )
3606 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003607 utilities.assert_equals(
3608 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003609 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003610 onpass="Correct leader was elected",
3611 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003612 main.Cluster.testLeader = leader
3613
Devin Lim58046fa2017-07-05 16:55:00 -07003614 def isElectionFunctional( self, main ):
3615 """
3616 Check that Leadership Election is still functional
3617 15.1 Run election on each node
3618 15.2 Check that each node has the same leaders and candidates
3619 15.3 Find current leader and withdraw
3620 15.4 Check that a new node was elected leader
3621 15.5 Check that that new leader was the candidate of old leader
3622 15.6 Run for election on old leader
3623 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3624 15.8 Make sure that the old leader was added to the candidate list
3625
3626 old and new variable prefixes refer to data from before vs after
3627 withdrawl and later before withdrawl vs after re-election
3628 """
3629 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003630 assert main, "main not defined"
3631 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003632
3633 description = "Check that Leadership Election is still functional"
3634 main.case( description )
3635 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3636
3637 oldLeaders = [] # list of lists of each nodes' candidates before
3638 newLeaders = [] # list of lists of each nodes' candidates after
3639 oldLeader = '' # the old leader from oldLeaders, None if not same
3640 newLeader = '' # the new leaders fron newLoeaders, None if not same
3641 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3642 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003643 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003644 expectNoLeader = True
3645
3646 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003647 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003648 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003649 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003650 actual=electionResult,
3651 onpass="All nodes successfully ran for leadership",
3652 onfail="At least one node failed to run for leadership" )
3653
3654 if electionResult == main.FALSE:
3655 main.log.error(
3656 "Skipping Test Case because Election Test App isn't loaded" )
3657 main.skipCase()
3658
3659 main.step( "Check that each node shows the same leader and candidates" )
3660 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003661 activeCLIs = main.Cluster.active()
3662 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003663 if sameResult:
3664 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003665 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003666 else:
3667 oldLeader = None
3668 utilities.assert_equals(
3669 expect=True,
3670 actual=sameResult,
3671 onpass="Leaderboards are consistent for the election topic",
3672 onfail=failMessage )
3673
3674 main.step( "Find current leader and withdraw" )
3675 withdrawResult = main.TRUE
3676 # do some sanity checking on leader before using it
3677 if oldLeader is None:
3678 main.log.error( "Leadership isn't consistent." )
3679 withdrawResult = main.FALSE
3680 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003681 for ctrl in main.Cluster.active():
3682 if oldLeader == ctrl.ipAddress:
3683 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003684 break
3685 else: # FOR/ELSE statement
3686 main.log.error( "Leader election, could not find current leader" )
3687 if oldLeader:
3688 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3689 utilities.assert_equals(
3690 expect=main.TRUE,
3691 actual=withdrawResult,
3692 onpass="Node was withdrawn from election",
3693 onfail="Node was not withdrawn from election" )
3694
3695 main.step( "Check that a new node was elected leader" )
3696 failMessage = "Nodes have different leaders"
3697 # Get new leaders and candidates
3698 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3699 newLeader = None
3700 if newLeaderResult:
3701 if newLeaders[ 0 ][ 0 ] == 'none':
3702 main.log.error( "No leader was elected on at least 1 node" )
3703 if not expectNoLeader:
3704 newLeaderResult = False
3705 newLeader = newLeaders[ 0 ][ 0 ]
3706
3707 # Check that the new leader is not the older leader, which was withdrawn
3708 if newLeader == oldLeader:
3709 newLeaderResult = False
3710 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3711 " as the current leader" )
3712 utilities.assert_equals(
3713 expect=True,
3714 actual=newLeaderResult,
3715 onpass="Leadership election passed",
3716 onfail="Something went wrong with Leadership election" )
3717
3718 main.step( "Check that that new leader was the candidate of old leader" )
3719 # candidates[ 2 ] should become the top candidate after withdrawl
3720 correctCandidateResult = main.TRUE
3721 if expectNoLeader:
3722 if newLeader == 'none':
3723 main.log.info( "No leader expected. None found. Pass" )
3724 correctCandidateResult = main.TRUE
3725 else:
3726 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3727 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003728 utilities.assert_equals(
3729 expect=main.TRUE,
3730 actual=correctCandidateResult,
3731 onpass="Correct Candidate Elected",
3732 onfail="Incorrect Candidate Elected" )
3733
3734 main.step( "Run for election on old leader( just so everyone " +
3735 "is in the hat )" )
3736 if oldLeaderCLI is not None:
3737 runResult = oldLeaderCLI.electionTestRun()
3738 else:
3739 main.log.error( "No old leader to re-elect" )
3740 runResult = main.FALSE
3741 utilities.assert_equals(
3742 expect=main.TRUE,
3743 actual=runResult,
3744 onpass="App re-ran for election",
3745 onfail="App failed to run for election" )
3746
3747 main.step(
3748 "Check that oldLeader is a candidate, and leader if only 1 node" )
3749 # verify leader didn't just change
3750 # Get new leaders and candidates
3751 reRunLeaders = []
3752 time.sleep( 5 ) # Paremterize
3753 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3754
Devin Lim58046fa2017-07-05 16:55:00 -07003755 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003756 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003757 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003758 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003759 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003760 assert main, "main not defined"
3761 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003762
3763 # Variables for the distributed primitives tests
3764 main.pCounterName = "TestON-Partitions"
3765 main.pCounterValue = 0
3766 main.onosSet = set( [] )
3767 main.onosSetName = "TestON-set"
3768
3769 description = "Install Primitives app"
3770 main.case( description )
3771 main.step( "Install Primitives app" )
3772 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003773 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003774 utilities.assert_equals( expect=main.TRUE,
3775 actual=appResults,
3776 onpass="Primitives app activated",
3777 onfail="Primitives app not activated" )
3778 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003779 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003780
3781 def upgradeInit( self, main ):
3782 '''
3783 Initiates an update
3784 '''
3785 main.step( "Send the command to initialize the upgrade" )
3786 ctrl = main.Cluster.next().CLI
3787 initialized = ctrl.issuInit()
3788 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3789 onpass="ISSU initialized",
3790 onfail="Error initializing the upgrade" )
3791
3792 main.step( "Check the status of the upgrade" )
3793 ctrl = main.Cluster.next().CLI
3794 status = ctrl.issu()
3795 main.log.debug( status )
3796 # TODO: check things here?
3797
3798 main.step( "Checking ONOS nodes" )
3799 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3800 False,
3801 sleep=15,
3802 attempts=5 )
3803 utilities.assert_equals( expect=True, actual=nodeResults,
3804 onpass="Nodes check successful",
3805 onfail="Nodes check NOT successful" )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003806
3807 def backupData( self, main, location ):
3808 """
3809 Backs up ONOS data and logs to a given location on each active node in a cluster
3810 """
3811 result = True
3812 for ctrl in main.Cluster.active():
3813 try:
3814 ctrl.server.handle.sendline( "rm " + location )
3815 ctrl.server.handle.expect( ctrl.server.prompt )
3816 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3817 except pexpect.ExceptionPexpect as e:
3818 main.log.error( e )
3819 main.cleanAndExit()
3820 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3821 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3822 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3823 return result
3824
3825 def restoreData( self, main, location ):
3826 """
3827 Restores ONOS data and logs from a given location on each node in a cluster
3828 """
3829 result = True
3830 for ctrl in main.Cluster.controllers:
3831 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3832 return result
Jon Hallab611372018-02-21 15:26:05 -08003833
3834 def startTopology( self, main ):
3835 """
3836 Starts Mininet using a topology file after pushing a network config file to ONOS.
3837 """
3838 import json
3839 import time
3840 main.case( "Starting Mininet Topology" )
3841
3842 main.step( "Pushing Network config" )
3843 ctrl = main.Cluster.next()
3844 cfgPath = main.testsRoot + main.params[ 'topology' ][ 'configPath' ]
3845 cfgResult = ctrl.onosNetCfg( ctrl.ipAddress,
3846 path=cfgPath,
3847 fileName=main.params[ 'topology' ][ 'configName' ] )
3848 utilities.assert_equals( expect=main.TRUE, actual=cfgResult,
3849 onpass="Pushed Network Configuration to ONOS",
3850 onfail="Failed to push Network Configuration to ONOS" )
3851
3852 main.step( "Check Network config" )
3853 try:
3854 cfgFile = cfgPath + main.params[ 'topology' ][ 'configName' ]
3855 with open( cfgFile, 'r' ) as contents:
3856 pushedNetCfg = json.load( contents )
3857 pushedNetCfg = json.loads( json.dumps( pushedNetCfg ).lower() )
3858 except IOError:
3859 main.log.exception( "Net Cfg file not found." )
3860 main.cleanAndExit()
3861 netCfgSleep = int( main.params[ 'timers' ][ 'NetCfg' ] )
3862 time.sleep( netCfgSleep )
3863 rawONOSNetCfg = utilities.retry( f=main.Cluster.next().REST.getNetCfg,
3864 retValue=False,
3865 attempts=5,
3866 sleep=netCfgSleep )
3867 # Fix differences between ONOS printing and Pushed Cfg
3868 onosNetCfg = json.loads( rawONOSNetCfg.lower() )
3869
3870 # Compare pushed device config
3871 cfgResult = True
3872 for did, pushedDevice in pushedNetCfg[ 'devices' ].items():
3873 onosDevice = onosNetCfg[ 'devices' ].get( did )
3874 if pushedDevice != onosDevice:
3875 cfgResult = False
3876 main.log.error( "Pushed Network configuration does not match what is in " +
3877 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedDevice ),
3878 ctrl.pprint( onosDevice ) ) )
3879
3880 # Compare pushed port config
3881 for portURI, pushedInterface in pushedNetCfg[ 'ports' ].items():
3882 onosInterface = onosNetCfg[ 'ports' ].get( portURI )
3883 # NOTE: pushed Cfg doesn't have macs
3884 for i in xrange( 0, len( pushedInterface[ 'interfaces' ] ) ):
3885 keys = pushedInterface[ 'interfaces' ][ i ].keys()
3886 portCompare = True
3887 for key in keys:
3888 if pushedInterface[ 'interfaces' ][ i ].get( key ) != onosInterface[ 'interfaces' ][ i ].get( key ) :
3889 main.log.debug( "{} mismatch for port {}".format( key, portURI ) )
3890 portCompare = False
3891 if not portCompare:
3892 cfgResult = False
3893 main.log.error( "Pushed Network configuration does not match what is in " +
3894 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedInterface ),
3895 ctrl.pprint( onosInterface ) ) )
3896
Jon Hall9677ed32018-04-24 11:16:23 -07003897 if pushedNetCfg.get( 'hosts' ) is not None:
3898 # Compare pushed host config
3899 for hid, pushedHost in pushedNetCfg[ 'hosts' ].items():
3900 onosHost = onosNetCfg[ 'hosts' ].get( hid.lower() )
3901 if pushedHost != onosHost:
3902 cfgResult = False
3903 main.log.error( "Pushed Network configuration does not match what is in " +
3904 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost),
3905 ctrl.pprint( onosHost ) ) )
Jon Hallab611372018-02-21 15:26:05 -08003906 utilities.assert_equals( expect=True,
3907 actual=cfgResult,
3908 onpass="Net Cfg set",
3909 onfail="Net Cfg not correctly set" )
3910 if not cfgResult:
3911 main.log.debug( "Pushed Network Config:" + ctrl.pprint( pushedNetCfg ) )
3912 main.log.debug( "ONOS Network Config:" + ctrl.pprint( onosNetCfg ) )
3913
3914 main.step( "Start Mininet topology" )
3915 for f in main.params[ 'topology' ][ 'files' ].values():
3916 main.ONOSbench.scp( main.Mininet1,
3917 f,
3918 main.Mininet1.home,
3919 direction="to" )
3920 topoName = main.params[ 'topology' ][ 'topoFile' ]
3921 topo = main.Mininet1.home + topoName
3922 ctrlList = ''
3923 for ctrl in main.Cluster.controllers:
3924 ctrlList += str( ctrl.ipAddress ) + ","
3925 args = main.params[ 'topology' ][ 'args' ]
3926 startResult = main.Mininet1.startNet( topoFile=topo,
3927 args=" --onos-ip=" + ctrlList + " " + args )
3928 utilities.assert_equals( expect=main.TRUE, actual=startResult,
3929 onpass="Mininet Started",
3930 onfail="Failed to start Mininet" )
3931 # Give SR app time to configure the network
3932 time.sleep( int( main.params[ 'timers' ][ 'SRSetup' ] ) )