blob: 262dbaf089f17e25668e7ffeecd40c95ccc77cb5 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070031
Devin Lim58046fa2017-07-05 16:55:00 -070032 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070033 # copy gen-partions file to ONOS
34 # NOTE: this assumes TestON and ONOS are on the same machine
35 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
36 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
37 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
38 main.ONOSbench.ip_address,
39 srcFile,
40 dstDir,
41 pwd=main.ONOSbench.pwd,
42 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070043
Devin Lim58046fa2017-07-05 16:55:00 -070044 def cleanUpGenPartition( self ):
45 # clean up gen-partitions file
46 try:
47 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
50 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
51 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
52 str( main.ONOSbench.handle.before ) )
53 except ( pexpect.TIMEOUT, pexpect.EOF ):
54 main.log.exception( "ONOSbench: pexpect exception found:" +
55 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070056 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070057
Devin Lim58046fa2017-07-05 16:55:00 -070058 def startingMininet( self ):
59 main.step( "Starting Mininet" )
60 # scp topo file to mininet
61 # TODO: move to params?
62 topoName = "obelisk.py"
63 filePath = main.ONOSbench.home + "/tools/test/topos/"
64 main.ONOSbench.scp( main.Mininet1,
65 filePath + topoName,
66 main.Mininet1.home,
67 direction="to" )
68 mnResult = main.Mininet1.startNet()
69 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
70 onpass="Mininet Started",
71 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070072
Devin Lim58046fa2017-07-05 16:55:00 -070073 def scalingMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070074 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070075 main.scaling = main.params[ 'scaling' ].split( "," )
76 main.log.debug( main.scaling )
77 scale = main.scaling.pop( 0 )
78 main.log.debug( scale )
79 if "e" in scale:
80 equal = True
81 else:
82 equal = False
83 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070084 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
85 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070086 utilities.assert_equals( expect=main.TRUE, actual=genResult,
87 onpass="New cluster metadata file generated",
88 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070089
Devin Lim58046fa2017-07-05 16:55:00 -070090 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070091 main.step( "Generate initial metadata file" )
92 if main.Cluster.numCtrls >= 5:
93 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070094 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070096 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070097 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim142b5342017-07-20 15:22:39 -0700101 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
Jon Hall4f360bc2017-09-07 10:19:52 -0700115 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700116 main.step( "Copying backup config files" )
117 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
118 cp = main.ONOSbench.scp( main.ONOSbench,
119 main.onosServicepath,
120 main.onosServicepath + ".backup",
121 direction="to" )
122
123 utilities.assert_equals( expect=main.TRUE,
124 actual=cp,
125 onpass="Copy backup config file succeeded",
126 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700127
128 def setMetadataUrl( self ):
129 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700130 # we need to modify the onos-service file to use remote metadata file
131 # url for cluster metadata file
132 iface = main.params[ 'server' ].get( 'interface' )
133 ip = main.ONOSbench.getIpAddr( iface=iface )
134 metaFile = "cluster.json"
135 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
136 main.log.warn( javaArgs )
137 main.log.warn( repr( javaArgs ) )
138 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700139 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
140 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700141 main.log.warn( sed )
142 main.log.warn( repr( sed ) )
143 handle.sendline( sed )
144 handle.expect( metaFile )
145 output = handle.before
146 handle.expect( "\$" )
147 output += handle.before
148 main.log.debug( repr( output ) )
149
150 def cleanUpOnosService( self ):
151 # Cleanup custom onos-service file
152 main.ONOSbench.scp( main.ONOSbench,
153 main.onosServicepath + ".backup",
154 main.onosServicepath,
155 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700156
Jon Halla440e872016-03-31 15:15:50 -0700157 def consistentCheck( self ):
158 """
159 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700160
Jon Hallf37d44d2017-05-24 10:37:30 -0700161 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700162 - onosCounters is the parsed json output of the counters command on
163 all nodes
164 - consistent is main.TRUE if all "TestON" counters are consitent across
165 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700166 """
Jon Halle1a3b752015-07-22 13:02:46 -0700167 try:
Jon Halla440e872016-03-31 15:15:50 -0700168 # Get onos counters results
169 onosCountersRaw = []
170 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700171 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700172 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700173 name="counters-" + str( ctrl ),
174 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700175 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700176 'randomTime': True } )
177 threads.append( t )
178 t.start()
179 for t in threads:
180 t.join()
181 onosCountersRaw.append( t.result )
182 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700183 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700184 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700185 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700186 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700187 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700188 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700189 main.log.warn( repr( onosCountersRaw[ i ] ) )
190 onosCounters.append( [] )
191
192 testCounters = {}
193 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700194 # lookes like a dict whose keys are the name of the ONOS node and
195 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700196 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700197 # }
198 # NOTE: There is an assumtion that all nodes are active
199 # based on the above for loops
200 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700202 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700203 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700204 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700205 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700206 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700207 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700208 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700209 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700210 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
211 if all( tmp ):
212 consistent = main.TRUE
213 else:
214 consistent = main.FALSE
215 main.log.error( "ONOS nodes have different values for counters:\n" +
216 testCounters )
217 return ( onosCounters, consistent )
218 except Exception:
219 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700220 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700221
222 def counterCheck( self, counterName, counterValue ):
223 """
224 Checks that TestON counters are consistent across all nodes and that
225 specified counter is in ONOS with the given value
226 """
227 try:
228 correctResults = main.TRUE
229 # Get onos counters results and consistentCheck
230 onosCounters, consistent = self.consistentCheck()
231 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700232 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700234 onosValue = None
235 try:
236 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700237 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700238 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700239 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700240 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700241 correctResults = main.FALSE
242 if onosValue == counterValue:
243 main.log.info( counterName + " counter value is correct" )
244 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700245 main.log.error( counterName +
246 " counter value is incorrect," +
247 " expected value: " + str( counterValue ) +
248 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700249 correctResults = main.FALSE
250 return consistent and correctResults
251 except Exception:
252 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700253 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700254
255 def consistentLeaderboards( self, nodes ):
256 TOPIC = 'org.onosproject.election'
257 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700258 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700259 for n in range( 5 ): # Retry in case election is still happening
260 leaderList = []
261 # Get all leaderboards
262 for cli in nodes:
263 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
264 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700265 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700266 leaderList is not None
267 main.log.debug( leaderList )
268 main.log.warn( result )
269 if result:
270 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700271 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700272 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
273 return ( result, leaderList )
274
Devin Lim58046fa2017-07-05 16:55:00 -0700275 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
Jeremy Ronquillo7f8fb572017-11-14 08:28:41 -0800276 # DEPRECATED: ONOSSetup.py now creates these graphs.
277
278 main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
Jon Hallca319892017-06-15 15:25:22 -0700279
Devin Lim58046fa2017-07-05 16:55:00 -0700280 def initialSetUp( self, serviceClean=False ):
281 """
282 rest of initialSetup
283 """
Devin Lim58046fa2017-07-05 16:55:00 -0700284 if main.params[ 'tcpdump' ].lower() == "true":
285 main.step( "Start Packet Capture MN" )
286 main.Mininet2.startTcpdump(
287 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
288 + "-MN.pcap",
289 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
290 port=main.params[ 'MNtcpdump' ][ 'port' ] )
291
292 if serviceClean:
293 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700294 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
295 main.ONOSbench.handle.expect( "\$" )
296 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
297 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700298
299 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800300 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700301 False,
Jon Hall5d5876e2017-11-30 09:33:16 -0800302 attempts=9 )
Devin Lim58046fa2017-07-05 16:55:00 -0700303
304 utilities.assert_equals( expect=True, actual=nodeResults,
305 onpass="Nodes check successful",
306 onfail="Nodes check NOT successful" )
307
308 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700309 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700310 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700311 ctrl.name,
312 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700313 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700314 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700315
316 main.step( "Activate apps defined in the params file" )
317 # get data from the params
318 apps = main.params.get( 'apps' )
319 if apps:
320 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700321 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700322 activateResult = True
323 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700324 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700325 # TODO: check this worked
326 time.sleep( 10 ) # wait for apps to activate
327 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700328 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700329 if state == "ACTIVE":
330 activateResult = activateResult and True
331 else:
332 main.log.error( "{} is in {} state".format( app, state ) )
333 activateResult = False
334 utilities.assert_equals( expect=True,
335 actual=activateResult,
336 onpass="Successfully activated apps",
337 onfail="Failed to activate apps" )
338 else:
339 main.log.warn( "No apps were specified to be loaded after startup" )
340
341 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700342 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700343 config = main.params.get( 'ONOS_Configuration' )
344 if config:
345 main.log.debug( config )
346 checkResult = main.TRUE
347 for component in config:
348 for setting in config[ component ]:
349 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700350 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700351 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
352 checkResult = check and checkResult
353 utilities.assert_equals( expect=main.TRUE,
354 actual=checkResult,
355 onpass="Successfully set config",
356 onfail="Failed to set config" )
357 else:
358 main.log.warn( "No configurations were specified to be changed after startup" )
359
Jon Hallca319892017-06-15 15:25:22 -0700360 main.step( "Check app ids" )
361 appCheck = self.appCheck()
362 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700363 onpass="App Ids seem to be correct",
364 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700365
Jon Hallca319892017-06-15 15:25:22 -0700366 def commonChecks( self ):
367 # TODO: make this assertable or assert in here?
368 self.topicsCheck()
369 self.partitionsCheck()
370 self.pendingMapCheck()
371 self.appCheck()
372
373 def topicsCheck( self, extraTopics=[] ):
374 """
375 Check for work partition topics in leaders output
376 """
377 leaders = main.Cluster.next().leaders()
378 missing = False
379 try:
380 if leaders:
381 parsedLeaders = json.loads( leaders )
382 output = json.dumps( parsedLeaders,
383 sort_keys=True,
384 indent=4,
385 separators=( ',', ': ' ) )
386 main.log.debug( "Leaders: " + output )
387 # check for all intent partitions
388 topics = []
389 for i in range( 14 ):
390 topics.append( "work-partition-" + str( i ) )
391 topics += extraTopics
392 main.log.debug( topics )
393 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
394 for topic in topics:
395 if topic not in ONOStopics:
396 main.log.error( "Error: " + topic +
397 " not in leaders" )
398 missing = True
399 else:
400 main.log.error( "leaders() returned None" )
401 except ( ValueError, TypeError ):
402 main.log.exception( "Error parsing leaders" )
403 main.log.error( repr( leaders ) )
404 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700405 # NOTE Can we refactor this into the Cluster class?
406 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700407 for ctrl in main.Cluster.active():
408 response = ctrl.CLI.leaders( jsonFormat=False )
409 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
410 str( response ) )
411 return missing
412
413 def partitionsCheck( self ):
414 # TODO: return something assertable
415 partitions = main.Cluster.next().partitions()
416 try:
417 if partitions:
418 parsedPartitions = json.loads( partitions )
419 output = json.dumps( parsedPartitions,
420 sort_keys=True,
421 indent=4,
422 separators=( ',', ': ' ) )
423 main.log.debug( "Partitions: " + output )
424 # TODO check for a leader in all paritions
425 # TODO check for consistency among nodes
426 else:
427 main.log.error( "partitions() returned None" )
428 except ( ValueError, TypeError ):
429 main.log.exception( "Error parsing partitions" )
430 main.log.error( repr( partitions ) )
431
432 def pendingMapCheck( self ):
433 pendingMap = main.Cluster.next().pendingMap()
434 try:
435 if pendingMap:
436 parsedPending = json.loads( pendingMap )
437 output = json.dumps( parsedPending,
438 sort_keys=True,
439 indent=4,
440 separators=( ',', ': ' ) )
441 main.log.debug( "Pending map: " + output )
442 # TODO check something here?
443 else:
444 main.log.error( "pendingMap() returned None" )
445 except ( ValueError, TypeError ):
446 main.log.exception( "Error parsing pending map" )
447 main.log.error( repr( pendingMap ) )
448
449 def appCheck( self ):
450 """
451 Check App IDs on all nodes
452 """
453 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800454 for i in range( 15 ):
455 # TODO modify retry or add a new version that accepts looking for
456 # a value in a return list instead of needing to match the entire
457 # return value to retry
458 appResults = main.Cluster.command( "appToIDCheck" )
459 appCheck = all( i == main.TRUE for i in appResults )
460 if appCheck:
461 break
462 else:
463 time.sleep( 5 )
464
Jon Hallca319892017-06-15 15:25:22 -0700465 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700466 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800467 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
468 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700469 return appCheck
470
Jon Halle0f0b342017-04-18 11:43:47 -0700471 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
472 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700473 completedValues = main.Cluster.command( "workQueueTotalCompleted",
474 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700475 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700476 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700477 completedResult = all( completedResults )
478 if not completedResult:
479 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
480 workQueueName, completed, completedValues ) )
481
482 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700483 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
484 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700485 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700486 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700487 inProgressResult = all( inProgressResults )
488 if not inProgressResult:
489 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
490 workQueueName, inProgress, inProgressValues ) )
491
492 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700493 pendingValues = main.Cluster.command( "workQueueTotalPending",
494 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700495 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700496 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700497 pendingResult = all( pendingResults )
498 if not pendingResult:
499 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
500 workQueueName, pending, pendingValues ) )
501 return completedResult and inProgressResult and pendingResult
502
Devin Lim58046fa2017-07-05 16:55:00 -0700503 def assignDevices( self, main ):
504 """
505 Assign devices to controllers
506 """
507 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700508 assert main, "main not defined"
509 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700510
511 main.case( "Assigning devices to controllers" )
512 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
513 "and check that an ONOS node becomes the " + \
514 "master of the device."
515 main.step( "Assign switches to controllers" )
516
Jon Hallca319892017-06-15 15:25:22 -0700517 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700518 swList = []
519 for i in range( 1, 29 ):
520 swList.append( "s" + str( i ) )
521 main.Mininet1.assignSwController( sw=swList, ip=ipList )
522
523 mastershipCheck = main.TRUE
524 for i in range( 1, 29 ):
525 response = main.Mininet1.getSwController( "s" + str( i ) )
526 try:
527 main.log.info( str( response ) )
528 except Exception:
529 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700530 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700531 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700532 mastershipCheck = mastershipCheck and main.TRUE
533 else:
Jon Hall4173b242017-09-12 17:04:38 -0700534 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700535 "not in the list of controllers s" +
536 str( i ) + " is connecting to." )
537 mastershipCheck = main.FALSE
538 utilities.assert_equals(
539 expect=main.TRUE,
540 actual=mastershipCheck,
541 onpass="Switch mastership assigned correctly",
542 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700543
Devin Lim58046fa2017-07-05 16:55:00 -0700544 def assignIntents( self, main ):
545 """
546 Assign intents
547 """
548 import time
549 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700550 assert main, "main not defined"
551 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700552 try:
553 main.HAlabels
554 except ( NameError, AttributeError ):
555 main.log.error( "main.HAlabels not defined, setting to []" )
556 main.HAlabels = []
557 try:
558 main.HAdata
559 except ( NameError, AttributeError ):
560 main.log.error( "data not defined, setting to []" )
561 main.HAdata = []
562 main.case( "Adding host Intents" )
563 main.caseExplanation = "Discover hosts by using pingall then " +\
564 "assign predetermined host-to-host intents." +\
565 " After installation, check that the intent" +\
566 " is distributed to all nodes and the state" +\
567 " is INSTALLED"
568
569 # install onos-app-fwd
570 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700571 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700572 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700573 utilities.assert_equals( expect=main.TRUE, actual=installResults,
574 onpass="Install fwd successful",
575 onfail="Install fwd failed" )
576
577 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700578 appCheck = self.appCheck()
579 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700580 onpass="App Ids seem to be correct",
581 onfail="Something is wrong with app Ids" )
582
583 main.step( "Discovering Hosts( Via pingall for now )" )
584 # FIXME: Once we have a host discovery mechanism, use that instead
585 # REACTIVE FWD test
586 pingResult = main.FALSE
587 passMsg = "Reactive Pingall test passed"
588 time1 = time.time()
589 pingResult = main.Mininet1.pingall()
590 time2 = time.time()
591 if not pingResult:
592 main.log.warn( "First pingall failed. Trying again..." )
593 pingResult = main.Mininet1.pingall()
594 passMsg += " on the second try"
595 utilities.assert_equals(
596 expect=main.TRUE,
597 actual=pingResult,
598 onpass=passMsg,
599 onfail="Reactive Pingall failed, " +
600 "one or more ping pairs failed" )
601 main.log.info( "Time for pingall: %2f seconds" %
602 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700603 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700604 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700605 # timeout for fwd flows
606 time.sleep( 11 )
607 # uninstall onos-app-fwd
608 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700609 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700610 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
611 onpass="Uninstall fwd successful",
612 onfail="Uninstall fwd failed" )
613
614 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700615 appCheck2 = self.appCheck()
616 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700617 onpass="App Ids seem to be correct",
618 onfail="Something is wrong with app Ids" )
619
620 main.step( "Add host intents via cli" )
621 intentIds = []
622 # TODO: move the host numbers to params
623 # Maybe look at all the paths we ping?
624 intentAddResult = True
625 hostResult = main.TRUE
626 for i in range( 8, 18 ):
627 main.log.info( "Adding host intent between h" + str( i ) +
628 " and h" + str( i + 10 ) )
629 host1 = "00:00:00:00:00:" + \
630 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
631 host2 = "00:00:00:00:00:" + \
632 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
633 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700634 host1Dict = onosCli.CLI.getHost( host1 )
635 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700636 host1Id = None
637 host2Id = None
638 if host1Dict and host2Dict:
639 host1Id = host1Dict.get( 'id', None )
640 host2Id = host2Dict.get( 'id', None )
641 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700642 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700643 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700644 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700645 if tmpId:
646 main.log.info( "Added intent with id: " + tmpId )
647 intentIds.append( tmpId )
648 else:
649 main.log.error( "addHostIntent returned: " +
650 repr( tmpId ) )
651 else:
652 main.log.error( "Error, getHost() failed for h" + str( i ) +
653 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700654 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700655 try:
Jon Hallca319892017-06-15 15:25:22 -0700656 output = json.dumps( json.loads( hosts ),
657 sort_keys=True,
658 indent=4,
659 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700660 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700661 output = repr( hosts )
662 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700663 hostResult = main.FALSE
664 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
665 onpass="Found a host id for each host",
666 onfail="Error looking up host ids" )
667
668 intentStart = time.time()
669 onosIds = onosCli.getAllIntentsId()
670 main.log.info( "Submitted intents: " + str( intentIds ) )
671 main.log.info( "Intents in ONOS: " + str( onosIds ) )
672 for intent in intentIds:
673 if intent in onosIds:
674 pass # intent submitted is in onos
675 else:
676 intentAddResult = False
677 if intentAddResult:
678 intentStop = time.time()
679 else:
680 intentStop = None
681 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700682 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700683 intentStates = []
684 installedCheck = True
685 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
686 count = 0
687 try:
688 for intent in json.loads( intents ):
689 state = intent.get( 'state', None )
690 if "INSTALLED" not in state:
691 installedCheck = False
692 intentId = intent.get( 'id', None )
693 intentStates.append( ( intentId, state ) )
694 except ( ValueError, TypeError ):
695 main.log.exception( "Error parsing intents" )
696 # add submitted intents not in the store
697 tmplist = [ i for i, s in intentStates ]
698 missingIntents = False
699 for i in intentIds:
700 if i not in tmplist:
701 intentStates.append( ( i, " - " ) )
702 missingIntents = True
703 intentStates.sort()
704 for i, s in intentStates:
705 count += 1
706 main.log.info( "%-6s%-15s%-15s" %
707 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700708 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700709
710 intentAddResult = bool( intentAddResult and not missingIntents and
711 installedCheck )
712 if not intentAddResult:
713 main.log.error( "Error in pushing host intents to ONOS" )
714
715 main.step( "Intent Anti-Entropy dispersion" )
716 for j in range( 100 ):
717 correct = True
718 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700719 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700720 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700721 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700722 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700723 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700724 str( sorted( onosIds ) ) )
725 if sorted( ids ) != sorted( intentIds ):
726 main.log.warn( "Set of intent IDs doesn't match" )
727 correct = False
728 break
729 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700730 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700731 for intent in intents:
732 if intent[ 'state' ] != "INSTALLED":
733 main.log.warn( "Intent " + intent[ 'id' ] +
734 " is " + intent[ 'state' ] )
735 correct = False
736 break
737 if correct:
738 break
739 else:
740 time.sleep( 1 )
741 if not intentStop:
742 intentStop = time.time()
743 global gossipTime
744 gossipTime = intentStop - intentStart
745 main.log.info( "It took about " + str( gossipTime ) +
746 " seconds for all intents to appear in each node" )
747 append = False
748 title = "Gossip Intents"
749 count = 1
750 while append is False:
751 curTitle = title + str( count )
752 if curTitle not in main.HAlabels:
753 main.HAlabels.append( curTitle )
754 main.HAdata.append( str( gossipTime ) )
755 append = True
756 else:
757 count += 1
758 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700759 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700760 utilities.assert_greater_equals(
761 expect=maxGossipTime, actual=gossipTime,
762 onpass="ECM anti-entropy for intents worked within " +
763 "expected time",
764 onfail="Intent ECM anti-entropy took too long. " +
765 "Expected time:{}, Actual time:{}".format( maxGossipTime,
766 gossipTime ) )
767 if gossipTime <= maxGossipTime:
768 intentAddResult = True
769
Jon Hallca319892017-06-15 15:25:22 -0700770 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700771 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700772 installedCheck = True
773 main.log.info( "Sleeping 60 seconds to see if intents are found" )
774 time.sleep( 60 )
775 onosIds = onosCli.getAllIntentsId()
776 main.log.info( "Submitted intents: " + str( intentIds ) )
777 main.log.info( "Intents in ONOS: " + str( onosIds ) )
778 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700779 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700780 intentStates = []
781 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
782 count = 0
783 try:
784 for intent in json.loads( intents ):
785 # Iter through intents of a node
786 state = intent.get( 'state', None )
787 if "INSTALLED" not in state:
788 installedCheck = False
789 intentId = intent.get( 'id', None )
790 intentStates.append( ( intentId, state ) )
791 except ( ValueError, TypeError ):
792 main.log.exception( "Error parsing intents" )
793 # add submitted intents not in the store
794 tmplist = [ i for i, s in intentStates ]
795 for i in intentIds:
796 if i not in tmplist:
797 intentStates.append( ( i, " - " ) )
798 intentStates.sort()
799 for i, s in intentStates:
800 count += 1
801 main.log.info( "%-6s%-15s%-15s" %
802 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700803 self.topicsCheck( [ "org.onosproject.election" ] )
804 self.partitionsCheck()
805 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700806
Jon Hallca319892017-06-15 15:25:22 -0700807 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700808 """
809 Ping across added host intents
810 """
811 import json
812 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700813 assert main, "main not defined"
814 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700815 main.case( "Verify connectivity by sending traffic across Intents" )
816 main.caseExplanation = "Ping across added host intents to check " +\
817 "functionality and check the state of " +\
818 "the intent"
819
Jon Hallca319892017-06-15 15:25:22 -0700820 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700821 main.step( "Check Intent state" )
822 installedCheck = False
823 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800824 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700825 installedCheck = True
826 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700827 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700828 intentStates = []
829 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
830 count = 0
831 # Iter through intents of a node
832 try:
833 for intent in json.loads( intents ):
834 state = intent.get( 'state', None )
835 if "INSTALLED" not in state:
836 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700837 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700838 intentId = intent.get( 'id', None )
839 intentStates.append( ( intentId, state ) )
840 except ( ValueError, TypeError ):
841 main.log.exception( "Error parsing intents." )
842 # Print states
843 intentStates.sort()
844 for i, s in intentStates:
845 count += 1
846 main.log.info( "%-6s%-15s%-15s" %
847 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700848 if not installedCheck:
849 time.sleep( 1 )
850 loopCount += 1
851 utilities.assert_equals( expect=True, actual=installedCheck,
852 onpass="Intents are all INSTALLED",
853 onfail="Intents are not all in " +
854 "INSTALLED state" )
855
856 main.step( "Ping across added host intents" )
857 PingResult = main.TRUE
858 for i in range( 8, 18 ):
859 ping = main.Mininet1.pingHost( src="h" + str( i ),
860 target="h" + str( i + 10 ) )
861 PingResult = PingResult and ping
862 if ping == main.FALSE:
863 main.log.warn( "Ping failed between h" + str( i ) +
864 " and h" + str( i + 10 ) )
865 elif ping == main.TRUE:
866 main.log.info( "Ping test passed!" )
867 # Don't set PingResult or you'd override failures
868 if PingResult == main.FALSE:
869 main.log.error(
870 "Intents have not been installed correctly, pings failed." )
871 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700872 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700873 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700874 output = json.dumps( json.loads( tmpIntents ),
875 sort_keys=True,
876 indent=4,
877 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700878 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700879 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700880 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700881 utilities.assert_equals(
882 expect=main.TRUE,
883 actual=PingResult,
884 onpass="Intents have been installed correctly and pings work",
885 onfail="Intents have not been installed correctly, pings failed." )
886
887 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700888 topicsCheck = self.topicsCheck()
889 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700890 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700891 onfail="Some topics were lost" )
892 self.partitionsCheck()
893 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700894
895 if not installedCheck:
896 main.log.info( "Waiting 60 seconds to see if the state of " +
897 "intents change" )
898 time.sleep( 60 )
899 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700900 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700901 intentStates = []
902 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
903 count = 0
904 # Iter through intents of a node
905 try:
906 for intent in json.loads( intents ):
907 state = intent.get( 'state', None )
908 if "INSTALLED" not in state:
909 installedCheck = False
910 intentId = intent.get( 'id', None )
911 intentStates.append( ( intentId, state ) )
912 except ( ValueError, TypeError ):
913 main.log.exception( "Error parsing intents." )
914 intentStates.sort()
915 for i, s in intentStates:
916 count += 1
917 main.log.info( "%-6s%-15s%-15s" %
918 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700919 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700920
Devin Lim58046fa2017-07-05 16:55:00 -0700921 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700922 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700923 main.step( "Wait a minute then ping again" )
924 # the wait is above
925 PingResult = main.TRUE
926 for i in range( 8, 18 ):
927 ping = main.Mininet1.pingHost( src="h" + str( i ),
928 target="h" + str( i + 10 ) )
929 PingResult = PingResult and ping
930 if ping == main.FALSE:
931 main.log.warn( "Ping failed between h" + str( i ) +
932 " and h" + str( i + 10 ) )
933 elif ping == main.TRUE:
934 main.log.info( "Ping test passed!" )
935 # Don't set PingResult or you'd override failures
936 if PingResult == main.FALSE:
937 main.log.error(
938 "Intents have not been installed correctly, pings failed." )
939 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700940 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700941 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700942 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700943 main.log.warn( json.dumps( json.loads( tmpIntents ),
944 sort_keys=True,
945 indent=4,
946 separators=( ',', ': ' ) ) )
947 except ( ValueError, TypeError ):
948 main.log.warn( repr( tmpIntents ) )
949 utilities.assert_equals(
950 expect=main.TRUE,
951 actual=PingResult,
952 onpass="Intents have been installed correctly and pings work",
953 onfail="Intents have not been installed correctly, pings failed." )
954
Devin Lim142b5342017-07-20 15:22:39 -0700955 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700956 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700957 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700958 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700959 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700960 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700961 actual=rolesNotNull,
962 onpass="Each device has a master",
963 onfail="Some devices don't have a master assigned" )
964
Devin Lim142b5342017-07-20 15:22:39 -0700965 def checkTheRole( self ):
966 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700967 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700968 consistentMastership = True
969 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700970 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700971 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700972 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700973 main.log.error( "Error in getting " + node + " roles" )
974 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700975 repr( ONOSMastership[ i ] ) )
976 rolesResults = False
977 utilities.assert_equals(
978 expect=True,
979 actual=rolesResults,
980 onpass="No error in reading roles output",
981 onfail="Error in reading roles from ONOS" )
982
983 main.step( "Check for consistency in roles from each controller" )
984 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
985 main.log.info(
986 "Switch roles are consistent across all ONOS nodes" )
987 else:
988 consistentMastership = False
989 utilities.assert_equals(
990 expect=True,
991 actual=consistentMastership,
992 onpass="Switch roles are consistent across all ONOS nodes",
993 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700994 return ONOSMastership, rolesResults, consistentMastership
995
996 def checkingIntents( self ):
997 main.step( "Get the intents from each controller" )
998 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
999 intentsResults = True
1000 for i in range( len( ONOSIntents ) ):
1001 node = str( main.Cluster.active( i ) )
1002 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1003 main.log.error( "Error in getting " + node + " intents" )
1004 main.log.warn( node + " intents response: " +
1005 repr( ONOSIntents[ i ] ) )
1006 intentsResults = False
1007 utilities.assert_equals(
1008 expect=True,
1009 actual=intentsResults,
1010 onpass="No error in reading intents output",
1011 onfail="Error in reading intents from ONOS" )
1012 return ONOSIntents, intentsResults
1013
1014 def readingState( self, main ):
1015 """
1016 Reading state of ONOS
1017 """
1018 import json
Devin Lim142b5342017-07-20 15:22:39 -07001019 assert main, "main not defined"
1020 assert utilities.assert_equals, "utilities.assert_equals not defined"
1021 try:
1022 from tests.dependencies.topology import Topology
1023 except ImportError:
1024 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001025 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001026 try:
1027 main.topoRelated
1028 except ( NameError, AttributeError ):
1029 main.topoRelated = Topology()
1030 main.case( "Setting up and gathering data for current state" )
1031 # The general idea for this test case is to pull the state of
1032 # ( intents,flows, topology,... ) from each ONOS node
1033 # We can then compare them with each other and also with past states
1034
1035 global mastershipState
1036 mastershipState = '[]'
1037
1038 self.checkRoleNotNull()
1039
1040 main.step( "Get the Mastership of each switch from each controller" )
1041 mastershipCheck = main.FALSE
1042
1043 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001044
1045 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001046 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001047 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001048 try:
1049 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001050 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001051 json.dumps(
1052 json.loads( ONOSMastership[ i ] ),
1053 sort_keys=True,
1054 indent=4,
1055 separators=( ',', ': ' ) ) )
1056 except ( ValueError, TypeError ):
1057 main.log.warn( repr( ONOSMastership[ i ] ) )
1058 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001059 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001060 mastershipState = ONOSMastership[ 0 ]
1061
Devin Lim58046fa2017-07-05 16:55:00 -07001062 global intentState
1063 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001064 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001065 intentCheck = main.FALSE
1066 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001067
Devin Lim58046fa2017-07-05 16:55:00 -07001068 main.step( "Check for consistency in Intents from each controller" )
1069 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1070 main.log.info( "Intents are consistent across all ONOS " +
1071 "nodes" )
1072 else:
1073 consistentIntents = False
1074 main.log.error( "Intents not consistent" )
1075 utilities.assert_equals(
1076 expect=True,
1077 actual=consistentIntents,
1078 onpass="Intents are consistent across all ONOS nodes",
1079 onfail="ONOS nodes have different views of intents" )
1080
1081 if intentsResults:
1082 # Try to make it easy to figure out what is happening
1083 #
1084 # Intent ONOS1 ONOS2 ...
1085 # 0x01 INSTALLED INSTALLING
1086 # ... ... ...
1087 # ... ... ...
1088 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001089 for ctrl in main.Cluster.active():
1090 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001091 main.log.warn( title )
1092 # get all intent keys in the cluster
1093 keys = []
1094 try:
1095 # Get the set of all intent keys
1096 for nodeStr in ONOSIntents:
1097 node = json.loads( nodeStr )
1098 for intent in node:
1099 keys.append( intent.get( 'id' ) )
1100 keys = set( keys )
1101 # For each intent key, print the state on each node
1102 for key in keys:
1103 row = "%-13s" % key
1104 for nodeStr in ONOSIntents:
1105 node = json.loads( nodeStr )
1106 for intent in node:
1107 if intent.get( 'id', "Error" ) == key:
1108 row += "%-15s" % intent.get( 'state' )
1109 main.log.warn( row )
1110 # End of intent state table
1111 except ValueError as e:
1112 main.log.exception( e )
1113 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1114
1115 if intentsResults and not consistentIntents:
1116 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001117 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001118 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1119 sort_keys=True,
1120 indent=4,
1121 separators=( ',', ': ' ) ) )
1122 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001123 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001124 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001125 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001126 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1127 sort_keys=True,
1128 indent=4,
1129 separators=( ',', ': ' ) ) )
1130 else:
Jon Hallca319892017-06-15 15:25:22 -07001131 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001132 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001133 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001134 intentState = ONOSIntents[ 0 ]
1135
1136 main.step( "Get the flows from each controller" )
1137 global flowState
1138 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001139 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001140 ONOSFlowsJson = []
1141 flowCheck = main.FALSE
1142 consistentFlows = True
1143 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001144 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001145 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001146 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001147 main.log.error( "Error in getting " + node + " flows" )
1148 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001149 repr( ONOSFlows[ i ] ) )
1150 flowsResults = False
1151 ONOSFlowsJson.append( None )
1152 else:
1153 try:
1154 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1155 except ( ValueError, TypeError ):
1156 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001157 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001158 " response as json." )
1159 main.log.error( repr( ONOSFlows[ i ] ) )
1160 ONOSFlowsJson.append( None )
1161 flowsResults = False
1162 utilities.assert_equals(
1163 expect=True,
1164 actual=flowsResults,
1165 onpass="No error in reading flows output",
1166 onfail="Error in reading flows from ONOS" )
1167
1168 main.step( "Check for consistency in Flows from each controller" )
1169 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1170 if all( tmp ):
1171 main.log.info( "Flow count is consistent across all ONOS nodes" )
1172 else:
1173 consistentFlows = False
1174 utilities.assert_equals(
1175 expect=True,
1176 actual=consistentFlows,
1177 onpass="The flow count is consistent across all ONOS nodes",
1178 onfail="ONOS nodes have different flow counts" )
1179
1180 if flowsResults and not consistentFlows:
1181 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001182 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001183 try:
1184 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001185 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001186 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1187 indent=4, separators=( ',', ': ' ) ) )
1188 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001189 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001190 repr( ONOSFlows[ i ] ) )
1191 elif flowsResults and consistentFlows:
1192 flowCheck = main.TRUE
1193 flowState = ONOSFlows[ 0 ]
1194
1195 main.step( "Get the OF Table entries" )
1196 global flows
1197 flows = []
1198 for i in range( 1, 29 ):
1199 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1200 if flowCheck == main.FALSE:
1201 for table in flows:
1202 main.log.warn( table )
1203 # TODO: Compare switch flow tables with ONOS flow tables
1204
1205 main.step( "Start continuous pings" )
1206 main.Mininet2.pingLong(
1207 src=main.params[ 'PING' ][ 'source1' ],
1208 target=main.params[ 'PING' ][ 'target1' ],
1209 pingTime=500 )
1210 main.Mininet2.pingLong(
1211 src=main.params[ 'PING' ][ 'source2' ],
1212 target=main.params[ 'PING' ][ 'target2' ],
1213 pingTime=500 )
1214 main.Mininet2.pingLong(
1215 src=main.params[ 'PING' ][ 'source3' ],
1216 target=main.params[ 'PING' ][ 'target3' ],
1217 pingTime=500 )
1218 main.Mininet2.pingLong(
1219 src=main.params[ 'PING' ][ 'source4' ],
1220 target=main.params[ 'PING' ][ 'target4' ],
1221 pingTime=500 )
1222 main.Mininet2.pingLong(
1223 src=main.params[ 'PING' ][ 'source5' ],
1224 target=main.params[ 'PING' ][ 'target5' ],
1225 pingTime=500 )
1226 main.Mininet2.pingLong(
1227 src=main.params[ 'PING' ][ 'source6' ],
1228 target=main.params[ 'PING' ][ 'target6' ],
1229 pingTime=500 )
1230 main.Mininet2.pingLong(
1231 src=main.params[ 'PING' ][ 'source7' ],
1232 target=main.params[ 'PING' ][ 'target7' ],
1233 pingTime=500 )
1234 main.Mininet2.pingLong(
1235 src=main.params[ 'PING' ][ 'source8' ],
1236 target=main.params[ 'PING' ][ 'target8' ],
1237 pingTime=500 )
1238 main.Mininet2.pingLong(
1239 src=main.params[ 'PING' ][ 'source9' ],
1240 target=main.params[ 'PING' ][ 'target9' ],
1241 pingTime=500 )
1242 main.Mininet2.pingLong(
1243 src=main.params[ 'PING' ][ 'source10' ],
1244 target=main.params[ 'PING' ][ 'target10' ],
1245 pingTime=500 )
1246
1247 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001248 devices = main.topoRelated.getAll( "devices" )
1249 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1250 ports = main.topoRelated.getAll( "ports" )
1251 links = main.topoRelated.getAll( "links" )
1252 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001253 # Compare json objects for hosts and dataplane clusters
1254
1255 # hosts
1256 main.step( "Host view is consistent across ONOS nodes" )
1257 consistentHostsResult = main.TRUE
1258 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001259 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001260 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1261 if hosts[ controller ] == hosts[ 0 ]:
1262 continue
1263 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001264 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001265 controllerStr +
1266 " is inconsistent with ONOS1" )
1267 main.log.warn( repr( hosts[ controller ] ) )
1268 consistentHostsResult = main.FALSE
1269
1270 else:
Jon Hallca319892017-06-15 15:25:22 -07001271 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001272 controllerStr )
1273 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001274 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001275 " hosts response: " +
1276 repr( hosts[ controller ] ) )
1277 utilities.assert_equals(
1278 expect=main.TRUE,
1279 actual=consistentHostsResult,
1280 onpass="Hosts view is consistent across all ONOS nodes",
1281 onfail="ONOS nodes have different views of hosts" )
1282
1283 main.step( "Each host has an IP address" )
1284 ipResult = main.TRUE
1285 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001286 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001287 if hosts[ controller ]:
1288 for host in hosts[ controller ]:
1289 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001290 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001291 controllerStr + ": " + str( host ) )
1292 ipResult = main.FALSE
1293 utilities.assert_equals(
1294 expect=main.TRUE,
1295 actual=ipResult,
1296 onpass="The ips of the hosts aren't empty",
1297 onfail="The ip of at least one host is missing" )
1298
1299 # Strongly connected clusters of devices
1300 main.step( "Cluster view is consistent across ONOS nodes" )
1301 consistentClustersResult = main.TRUE
1302 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001303 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001304 if "Error" not in clusters[ controller ]:
1305 if clusters[ controller ] == clusters[ 0 ]:
1306 continue
1307 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001308 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001309 " is inconsistent with ONOS1" )
1310 consistentClustersResult = main.FALSE
1311
1312 else:
1313 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001314 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001315 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001316 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001317 " clusters response: " +
1318 repr( clusters[ controller ] ) )
1319 utilities.assert_equals(
1320 expect=main.TRUE,
1321 actual=consistentClustersResult,
1322 onpass="Clusters view is consistent across all ONOS nodes",
1323 onfail="ONOS nodes have different views of clusters" )
1324 if not consistentClustersResult:
1325 main.log.debug( clusters )
1326
1327 # there should always only be one cluster
1328 main.step( "Cluster view correct across ONOS nodes" )
1329 try:
1330 numClusters = len( json.loads( clusters[ 0 ] ) )
1331 except ( ValueError, TypeError ):
1332 main.log.exception( "Error parsing clusters[0]: " +
1333 repr( clusters[ 0 ] ) )
1334 numClusters = "ERROR"
1335 utilities.assert_equals(
1336 expect=1,
1337 actual=numClusters,
1338 onpass="ONOS shows 1 SCC",
1339 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1340
1341 main.step( "Comparing ONOS topology to MN" )
1342 devicesResults = main.TRUE
1343 linksResults = main.TRUE
1344 hostsResults = main.TRUE
1345 mnSwitches = main.Mininet1.getSwitches()
1346 mnLinks = main.Mininet1.getLinks()
1347 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001348 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001349 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001350 currentDevicesResult = main.topoRelated.compareDevicePort(
1351 main.Mininet1, controller,
1352 mnSwitches, devices, ports )
1353 utilities.assert_equals( expect=main.TRUE,
1354 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001355 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001356 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001357 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001358 " Switches view is incorrect" )
1359
1360 currentLinksResult = main.topoRelated.compareBase( links, controller,
1361 main.Mininet1.compareLinks,
1362 [ mnSwitches, mnLinks ] )
1363 utilities.assert_equals( expect=main.TRUE,
1364 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001365 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001366 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001367 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001368 " links view is incorrect" )
1369
1370 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1371 currentHostsResult = main.Mininet1.compareHosts(
1372 mnHosts,
1373 hosts[ controller ] )
1374 else:
1375 currentHostsResult = main.FALSE
1376 utilities.assert_equals( expect=main.TRUE,
1377 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001378 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001379 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001380 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001381 " hosts don't match Mininet" )
1382
1383 devicesResults = devicesResults and currentDevicesResult
1384 linksResults = linksResults and currentLinksResult
1385 hostsResults = hostsResults and currentHostsResult
1386
1387 main.step( "Device information is correct" )
1388 utilities.assert_equals(
1389 expect=main.TRUE,
1390 actual=devicesResults,
1391 onpass="Device information is correct",
1392 onfail="Device information is incorrect" )
1393
1394 main.step( "Links are correct" )
1395 utilities.assert_equals(
1396 expect=main.TRUE,
1397 actual=linksResults,
1398 onpass="Link are correct",
1399 onfail="Links are incorrect" )
1400
1401 main.step( "Hosts are correct" )
1402 utilities.assert_equals(
1403 expect=main.TRUE,
1404 actual=hostsResults,
1405 onpass="Hosts are correct",
1406 onfail="Hosts are incorrect" )
1407
1408 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001409 """
1410 Check for basic functionality with distributed primitives
1411 """
Jon Halle0f0b342017-04-18 11:43:47 -07001412 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001413 try:
1414 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001415 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001416 assert main.pCounterName, "main.pCounterName not defined"
1417 assert main.onosSetName, "main.onosSetName not defined"
1418 # NOTE: assert fails if value is 0/None/Empty/False
1419 try:
1420 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001421 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001422 main.log.error( "main.pCounterValue not defined, setting to 0" )
1423 main.pCounterValue = 0
1424 try:
1425 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001426 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001427 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001428 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001429 # Variables for the distributed primitives tests. These are local only
1430 addValue = "a"
1431 addAllValue = "a b c d e f"
1432 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001433 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001434 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001435 workQueueName = "TestON-Queue"
1436 workQueueCompleted = 0
1437 workQueueInProgress = 0
1438 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001439
1440 description = "Check for basic functionality with distributed " +\
1441 "primitives"
1442 main.case( description )
1443 main.caseExplanation = "Test the methods of the distributed " +\
1444 "primitives (counters and sets) throught the cli"
1445 # DISTRIBUTED ATOMIC COUNTERS
1446 # Partitioned counters
1447 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001448 pCounters = main.Cluster.command( "counterTestAddAndGet",
1449 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001450 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001451 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001452 main.pCounterValue += 1
1453 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001454 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 pCounterResults = True
1456 for i in addedPValues:
1457 tmpResult = i in pCounters
1458 pCounterResults = pCounterResults and tmpResult
1459 if not tmpResult:
1460 main.log.error( str( i ) + " is not in partitioned "
1461 "counter incremented results" )
1462 utilities.assert_equals( expect=True,
1463 actual=pCounterResults,
1464 onpass="Default counter incremented",
1465 onfail="Error incrementing default" +
1466 " counter" )
1467
1468 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001469 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1470 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001471 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001472 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001473 addedPValues.append( main.pCounterValue )
1474 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001475 # Check that counter incremented numController times
1476 pCounterResults = True
1477 for i in addedPValues:
1478 tmpResult = i in pCounters
1479 pCounterResults = pCounterResults and tmpResult
1480 if not tmpResult:
1481 main.log.error( str( i ) + " is not in partitioned "
1482 "counter incremented results" )
1483 utilities.assert_equals( expect=True,
1484 actual=pCounterResults,
1485 onpass="Default counter incremented",
1486 onfail="Error incrementing default" +
1487 " counter" )
1488
1489 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001490 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001491 utilities.assert_equals( expect=main.TRUE,
1492 actual=incrementCheck,
1493 onpass="Added counters are correct",
1494 onfail="Added counters are incorrect" )
1495
1496 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001497 pCounters = main.Cluster.command( "counterTestAddAndGet",
1498 args=[ main.pCounterName ],
1499 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001500 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001501 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001502 main.pCounterValue += -8
1503 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001504 # Check that counter incremented numController times
1505 pCounterResults = True
1506 for i in addedPValues:
1507 tmpResult = i in pCounters
1508 pCounterResults = pCounterResults and tmpResult
1509 if not tmpResult:
1510 main.log.error( str( i ) + " is not in partitioned "
1511 "counter incremented results" )
1512 utilities.assert_equals( expect=True,
1513 actual=pCounterResults,
1514 onpass="Default counter incremented",
1515 onfail="Error incrementing default" +
1516 " counter" )
1517
1518 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001519 pCounters = main.Cluster.command( "counterTestAddAndGet",
1520 args=[ main.pCounterName ],
1521 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001522 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001523 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001524 main.pCounterValue += 5
1525 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001526
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001527 # Check that counter incremented numController times
1528 pCounterResults = True
1529 for i in addedPValues:
1530 tmpResult = i in pCounters
1531 pCounterResults = pCounterResults and tmpResult
1532 if not tmpResult:
1533 main.log.error( str( i ) + " is not in partitioned "
1534 "counter incremented results" )
1535 utilities.assert_equals( expect=True,
1536 actual=pCounterResults,
1537 onpass="Default counter incremented",
1538 onfail="Error incrementing default" +
1539 " counter" )
1540
1541 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001542 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1543 args=[ main.pCounterName ],
1544 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001545 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001546 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001547 addedPValues.append( main.pCounterValue )
1548 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001549 # Check that counter incremented numController times
1550 pCounterResults = True
1551 for i in addedPValues:
1552 tmpResult = i in pCounters
1553 pCounterResults = pCounterResults and tmpResult
1554 if not tmpResult:
1555 main.log.error( str( i ) + " is not in partitioned "
1556 "counter incremented results" )
1557 utilities.assert_equals( expect=True,
1558 actual=pCounterResults,
1559 onpass="Default counter incremented",
1560 onfail="Error incrementing default" +
1561 " counter" )
1562
1563 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001564 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001565 utilities.assert_equals( expect=main.TRUE,
1566 actual=incrementCheck,
1567 onpass="Added counters are correct",
1568 onfail="Added counters are incorrect" )
1569
1570 # DISTRIBUTED SETS
1571 main.step( "Distributed Set get" )
1572 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001573 getResponses = main.Cluster.command( "setTestGet",
1574 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001575 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001576 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001577 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001578 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001579 current = set( getResponses[ i ] )
1580 if len( current ) == len( getResponses[ i ] ):
1581 # no repeats
1582 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001583 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 " has incorrect view" +
1585 " of set " + main.onosSetName + ":\n" +
1586 str( getResponses[ i ] ) )
1587 main.log.debug( "Expected: " + str( main.onosSet ) )
1588 main.log.debug( "Actual: " + str( current ) )
1589 getResults = main.FALSE
1590 else:
1591 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001592 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001593 " has repeat elements in" +
1594 " set " + main.onosSetName + ":\n" +
1595 str( getResponses[ i ] ) )
1596 getResults = main.FALSE
1597 elif getResponses[ i ] == main.ERROR:
1598 getResults = main.FALSE
1599 utilities.assert_equals( expect=main.TRUE,
1600 actual=getResults,
1601 onpass="Set elements are correct",
1602 onfail="Set elements are incorrect" )
1603
1604 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001605 sizeResponses = main.Cluster.command( "setTestSize",
1606 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001607 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001608 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001609 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001610 if size != sizeResponses[ i ]:
1611 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001612 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001613 " expected a size of " + str( size ) +
1614 " for set " + main.onosSetName +
1615 " but got " + str( sizeResponses[ i ] ) )
1616 utilities.assert_equals( expect=main.TRUE,
1617 actual=sizeResults,
1618 onpass="Set sizes are correct",
1619 onfail="Set sizes are incorrect" )
1620
1621 main.step( "Distributed Set add()" )
1622 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001623 addResponses = main.Cluster.command( "setTestAdd",
1624 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001625 # main.TRUE = successfully changed the set
1626 # main.FALSE = action resulted in no change in set
1627 # main.ERROR - Some error in executing the function
1628 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001629 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001630 if addResponses[ i ] == main.TRUE:
1631 # All is well
1632 pass
1633 elif addResponses[ i ] == main.FALSE:
1634 # Already in set, probably fine
1635 pass
1636 elif addResponses[ i ] == main.ERROR:
1637 # Error in execution
1638 addResults = main.FALSE
1639 else:
1640 # unexpected result
1641 addResults = main.FALSE
1642 if addResults != main.TRUE:
1643 main.log.error( "Error executing set add" )
1644
1645 # Check if set is still correct
1646 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001647 getResponses = main.Cluster.command( "setTestGet",
1648 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001649 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001650 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001651 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001652 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001653 current = set( getResponses[ i ] )
1654 if len( current ) == len( getResponses[ i ] ):
1655 # no repeats
1656 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001657 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001658 " of set " + main.onosSetName + ":\n" +
1659 str( getResponses[ i ] ) )
1660 main.log.debug( "Expected: " + str( main.onosSet ) )
1661 main.log.debug( "Actual: " + str( current ) )
1662 getResults = main.FALSE
1663 else:
1664 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001665 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001666 " set " + main.onosSetName + ":\n" +
1667 str( getResponses[ i ] ) )
1668 getResults = main.FALSE
1669 elif getResponses[ i ] == main.ERROR:
1670 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001671 sizeResponses = main.Cluster.command( "setTestSize",
1672 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001673 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001674 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001675 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001676 if size != sizeResponses[ i ]:
1677 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001678 main.log.error( node + " expected a size of " +
1679 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001680 " but got " + str( sizeResponses[ i ] ) )
1681 addResults = addResults and getResults and sizeResults
1682 utilities.assert_equals( expect=main.TRUE,
1683 actual=addResults,
1684 onpass="Set add correct",
1685 onfail="Set add was incorrect" )
1686
1687 main.step( "Distributed Set addAll()" )
1688 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001689 addResponses = main.Cluster.command( "setTestAdd",
1690 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001691 # main.TRUE = successfully changed the set
1692 # main.FALSE = action resulted in no change in set
1693 # main.ERROR - Some error in executing the function
1694 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001695 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001696 if addResponses[ i ] == main.TRUE:
1697 # All is well
1698 pass
1699 elif addResponses[ i ] == main.FALSE:
1700 # Already in set, probably fine
1701 pass
1702 elif addResponses[ i ] == main.ERROR:
1703 # Error in execution
1704 addAllResults = main.FALSE
1705 else:
1706 # unexpected result
1707 addAllResults = main.FALSE
1708 if addAllResults != main.TRUE:
1709 main.log.error( "Error executing set addAll" )
1710
1711 # Check if set is still correct
1712 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001713 getResponses = main.Cluster.command( "setTestGet",
1714 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001716 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001717 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001718 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001719 current = set( getResponses[ i ] )
1720 if len( current ) == len( getResponses[ i ] ):
1721 # no repeats
1722 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001723 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001724 " of set " + main.onosSetName + ":\n" +
1725 str( getResponses[ i ] ) )
1726 main.log.debug( "Expected: " + str( main.onosSet ) )
1727 main.log.debug( "Actual: " + str( current ) )
1728 getResults = main.FALSE
1729 else:
1730 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001731 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001732 " set " + main.onosSetName + ":\n" +
1733 str( getResponses[ i ] ) )
1734 getResults = main.FALSE
1735 elif getResponses[ i ] == main.ERROR:
1736 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001737 sizeResponses = main.Cluster.command( "setTestSize",
1738 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001739 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001740 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001741 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001742 if size != sizeResponses[ i ]:
1743 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001744 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001745 " for set " + main.onosSetName +
1746 " but got " + str( sizeResponses[ i ] ) )
1747 addAllResults = addAllResults and getResults and sizeResults
1748 utilities.assert_equals( expect=main.TRUE,
1749 actual=addAllResults,
1750 onpass="Set addAll correct",
1751 onfail="Set addAll was incorrect" )
1752
1753 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001754 containsResponses = main.Cluster.command( "setTestGet",
1755 args=[ main.onosSetName ],
1756 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001757 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001758 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001759 if containsResponses[ i ] == main.ERROR:
1760 containsResults = main.FALSE
1761 else:
1762 containsResults = containsResults and\
1763 containsResponses[ i ][ 1 ]
1764 utilities.assert_equals( expect=main.TRUE,
1765 actual=containsResults,
1766 onpass="Set contains is functional",
1767 onfail="Set contains failed" )
1768
1769 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001770 containsAllResponses = main.Cluster.command( "setTestGet",
1771 args=[ main.onosSetName ],
1772 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001773 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001774 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001775 if containsResponses[ i ] == main.ERROR:
1776 containsResults = main.FALSE
1777 else:
1778 containsResults = containsResults and\
1779 containsResponses[ i ][ 1 ]
1780 utilities.assert_equals( expect=main.TRUE,
1781 actual=containsAllResults,
1782 onpass="Set containsAll is functional",
1783 onfail="Set containsAll failed" )
1784
1785 main.step( "Distributed Set remove()" )
1786 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001787 removeResponses = main.Cluster.command( "setTestRemove",
1788 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001789 # main.TRUE = successfully changed the set
1790 # main.FALSE = action resulted in no change in set
1791 # main.ERROR - Some error in executing the function
1792 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001793 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001794 if removeResponses[ i ] == main.TRUE:
1795 # All is well
1796 pass
1797 elif removeResponses[ i ] == main.FALSE:
1798 # not in set, probably fine
1799 pass
1800 elif removeResponses[ i ] == main.ERROR:
1801 # Error in execution
1802 removeResults = main.FALSE
1803 else:
1804 # unexpected result
1805 removeResults = main.FALSE
1806 if removeResults != main.TRUE:
1807 main.log.error( "Error executing set remove" )
1808
1809 # Check if set is still correct
1810 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001811 getResponses = main.Cluster.command( "setTestGet",
1812 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001813 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001814 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001815 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001816 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001817 current = set( getResponses[ i ] )
1818 if len( current ) == len( getResponses[ i ] ):
1819 # no repeats
1820 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001821 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001822 " of set " + main.onosSetName + ":\n" +
1823 str( getResponses[ i ] ) )
1824 main.log.debug( "Expected: " + str( main.onosSet ) )
1825 main.log.debug( "Actual: " + str( current ) )
1826 getResults = main.FALSE
1827 else:
1828 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001829 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001830 " set " + main.onosSetName + ":\n" +
1831 str( getResponses[ i ] ) )
1832 getResults = main.FALSE
1833 elif getResponses[ i ] == main.ERROR:
1834 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001835 sizeResponses = main.Cluster.command( "setTestSize",
1836 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001837 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001838 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001839 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001840 if size != sizeResponses[ i ]:
1841 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001842 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001843 " for set " + main.onosSetName +
1844 " but got " + str( sizeResponses[ i ] ) )
1845 removeResults = removeResults and getResults and sizeResults
1846 utilities.assert_equals( expect=main.TRUE,
1847 actual=removeResults,
1848 onpass="Set remove correct",
1849 onfail="Set remove was incorrect" )
1850
1851 main.step( "Distributed Set removeAll()" )
1852 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001853 removeAllResponses = main.Cluster.command( "setTestRemove",
1854 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001855 # main.TRUE = successfully changed the set
1856 # main.FALSE = action resulted in no change in set
1857 # main.ERROR - Some error in executing the function
1858 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001859 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001860 if removeAllResponses[ i ] == main.TRUE:
1861 # All is well
1862 pass
1863 elif removeAllResponses[ i ] == main.FALSE:
1864 # not in set, probably fine
1865 pass
1866 elif removeAllResponses[ i ] == main.ERROR:
1867 # Error in execution
1868 removeAllResults = main.FALSE
1869 else:
1870 # unexpected result
1871 removeAllResults = main.FALSE
1872 if removeAllResults != main.TRUE:
1873 main.log.error( "Error executing set removeAll" )
1874
1875 # Check if set is still correct
1876 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001877 getResponses = main.Cluster.command( "setTestGet",
1878 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001879 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001880 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001881 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001882 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001883 current = set( getResponses[ i ] )
1884 if len( current ) == len( getResponses[ i ] ):
1885 # no repeats
1886 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001887 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001888 " of set " + main.onosSetName + ":\n" +
1889 str( getResponses[ i ] ) )
1890 main.log.debug( "Expected: " + str( main.onosSet ) )
1891 main.log.debug( "Actual: " + str( current ) )
1892 getResults = main.FALSE
1893 else:
1894 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001895 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001896 " set " + main.onosSetName + ":\n" +
1897 str( getResponses[ i ] ) )
1898 getResults = main.FALSE
1899 elif getResponses[ i ] == main.ERROR:
1900 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001901 sizeResponses = main.Cluster.command( "setTestSize",
1902 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001903 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001904 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001905 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001906 if size != sizeResponses[ i ]:
1907 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001908 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001909 " for set " + main.onosSetName +
1910 " but got " + str( sizeResponses[ i ] ) )
1911 removeAllResults = removeAllResults and getResults and sizeResults
1912 utilities.assert_equals( expect=main.TRUE,
1913 actual=removeAllResults,
1914 onpass="Set removeAll correct",
1915 onfail="Set removeAll was incorrect" )
1916
1917 main.step( "Distributed Set addAll()" )
1918 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001919 addResponses = main.Cluster.command( "setTestAdd",
1920 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001921 # main.TRUE = successfully changed the set
1922 # main.FALSE = action resulted in no change in set
1923 # main.ERROR - Some error in executing the function
1924 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001925 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001926 if addResponses[ i ] == main.TRUE:
1927 # All is well
1928 pass
1929 elif addResponses[ i ] == main.FALSE:
1930 # Already in set, probably fine
1931 pass
1932 elif addResponses[ i ] == main.ERROR:
1933 # Error in execution
1934 addAllResults = main.FALSE
1935 else:
1936 # unexpected result
1937 addAllResults = main.FALSE
1938 if addAllResults != main.TRUE:
1939 main.log.error( "Error executing set addAll" )
1940
1941 # Check if set is still correct
1942 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001943 getResponses = main.Cluster.command( "setTestGet",
1944 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001945 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001946 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001947 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001948 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001949 current = set( getResponses[ i ] )
1950 if len( current ) == len( getResponses[ i ] ):
1951 # no repeats
1952 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001953 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001954 " of set " + main.onosSetName + ":\n" +
1955 str( getResponses[ i ] ) )
1956 main.log.debug( "Expected: " + str( main.onosSet ) )
1957 main.log.debug( "Actual: " + str( current ) )
1958 getResults = main.FALSE
1959 else:
1960 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001961 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001962 " set " + main.onosSetName + ":\n" +
1963 str( getResponses[ i ] ) )
1964 getResults = main.FALSE
1965 elif getResponses[ i ] == main.ERROR:
1966 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001967 sizeResponses = main.Cluster.command( "setTestSize",
1968 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001969 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001970 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001971 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001972 if size != sizeResponses[ i ]:
1973 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001974 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001975 " for set " + main.onosSetName +
1976 " but got " + str( sizeResponses[ i ] ) )
1977 addAllResults = addAllResults and getResults and sizeResults
1978 utilities.assert_equals( expect=main.TRUE,
1979 actual=addAllResults,
1980 onpass="Set addAll correct",
1981 onfail="Set addAll was incorrect" )
1982
1983 main.step( "Distributed Set clear()" )
1984 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001985 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001986 args=[ main.onosSetName, " " ], # Values doesn't matter
1987 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001988 # main.TRUE = successfully changed the set
1989 # main.FALSE = action resulted in no change in set
1990 # main.ERROR - Some error in executing the function
1991 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001992 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001993 if clearResponses[ i ] == main.TRUE:
1994 # All is well
1995 pass
1996 elif clearResponses[ i ] == main.FALSE:
1997 # Nothing set, probably fine
1998 pass
1999 elif clearResponses[ i ] == main.ERROR:
2000 # Error in execution
2001 clearResults = main.FALSE
2002 else:
2003 # unexpected result
2004 clearResults = main.FALSE
2005 if clearResults != main.TRUE:
2006 main.log.error( "Error executing set clear" )
2007
2008 # Check if set is still correct
2009 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002010 getResponses = main.Cluster.command( "setTestGet",
2011 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002012 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002013 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002014 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002015 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002016 current = set( getResponses[ i ] )
2017 if len( current ) == len( getResponses[ i ] ):
2018 # no repeats
2019 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002020 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002021 " of set " + main.onosSetName + ":\n" +
2022 str( getResponses[ i ] ) )
2023 main.log.debug( "Expected: " + str( main.onosSet ) )
2024 main.log.debug( "Actual: " + str( current ) )
2025 getResults = main.FALSE
2026 else:
2027 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002028 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002029 " set " + main.onosSetName + ":\n" +
2030 str( getResponses[ i ] ) )
2031 getResults = main.FALSE
2032 elif getResponses[ i ] == main.ERROR:
2033 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002034 sizeResponses = main.Cluster.command( "setTestSize",
2035 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002036 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002037 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002038 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002039 if size != sizeResponses[ i ]:
2040 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002041 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002042 " for set " + main.onosSetName +
2043 " but got " + str( sizeResponses[ i ] ) )
2044 clearResults = clearResults and getResults and sizeResults
2045 utilities.assert_equals( expect=main.TRUE,
2046 actual=clearResults,
2047 onpass="Set clear correct",
2048 onfail="Set clear was incorrect" )
2049
2050 main.step( "Distributed Set addAll()" )
2051 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002052 addResponses = main.Cluster.command( "setTestAdd",
2053 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002054 # main.TRUE = successfully changed the set
2055 # main.FALSE = action resulted in no change in set
2056 # main.ERROR - Some error in executing the function
2057 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002058 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002059 if addResponses[ i ] == main.TRUE:
2060 # All is well
2061 pass
2062 elif addResponses[ i ] == main.FALSE:
2063 # Already in set, probably fine
2064 pass
2065 elif addResponses[ i ] == main.ERROR:
2066 # Error in execution
2067 addAllResults = main.FALSE
2068 else:
2069 # unexpected result
2070 addAllResults = main.FALSE
2071 if addAllResults != main.TRUE:
2072 main.log.error( "Error executing set addAll" )
2073
2074 # Check if set is still correct
2075 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002076 getResponses = main.Cluster.command( "setTestGet",
2077 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002078 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002079 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002080 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002081 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002082 current = set( getResponses[ i ] )
2083 if len( current ) == len( getResponses[ i ] ):
2084 # no repeats
2085 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002086 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002087 " of set " + main.onosSetName + ":\n" +
2088 str( getResponses[ i ] ) )
2089 main.log.debug( "Expected: " + str( main.onosSet ) )
2090 main.log.debug( "Actual: " + str( current ) )
2091 getResults = main.FALSE
2092 else:
2093 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002094 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002095 " set " + main.onosSetName + ":\n" +
2096 str( getResponses[ i ] ) )
2097 getResults = main.FALSE
2098 elif getResponses[ i ] == main.ERROR:
2099 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002100 sizeResponses = main.Cluster.command( "setTestSize",
2101 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002102 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002103 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002104 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002105 if size != sizeResponses[ i ]:
2106 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002107 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002108 " for set " + main.onosSetName +
2109 " but got " + str( sizeResponses[ i ] ) )
2110 addAllResults = addAllResults and getResults and sizeResults
2111 utilities.assert_equals( expect=main.TRUE,
2112 actual=addAllResults,
2113 onpass="Set addAll correct",
2114 onfail="Set addAll was incorrect" )
2115
2116 main.step( "Distributed Set retain()" )
2117 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002118 retainResponses = main.Cluster.command( "setTestRemove",
2119 args=[ main.onosSetName, retainValue ],
2120 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002121 # main.TRUE = successfully changed the set
2122 # main.FALSE = action resulted in no change in set
2123 # main.ERROR - Some error in executing the function
2124 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002125 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002126 if retainResponses[ i ] == main.TRUE:
2127 # All is well
2128 pass
2129 elif retainResponses[ i ] == main.FALSE:
2130 # Already in set, probably fine
2131 pass
2132 elif retainResponses[ i ] == main.ERROR:
2133 # Error in execution
2134 retainResults = main.FALSE
2135 else:
2136 # unexpected result
2137 retainResults = main.FALSE
2138 if retainResults != main.TRUE:
2139 main.log.error( "Error executing set retain" )
2140
2141 # Check if set is still correct
2142 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002143 getResponses = main.Cluster.command( "setTestGet",
2144 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002145 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002146 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002147 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002148 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002149 current = set( getResponses[ i ] )
2150 if len( current ) == len( getResponses[ i ] ):
2151 # no repeats
2152 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002153 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002154 " of set " + main.onosSetName + ":\n" +
2155 str( getResponses[ i ] ) )
2156 main.log.debug( "Expected: " + str( main.onosSet ) )
2157 main.log.debug( "Actual: " + str( current ) )
2158 getResults = main.FALSE
2159 else:
2160 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002161 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002162 " set " + main.onosSetName + ":\n" +
2163 str( getResponses[ i ] ) )
2164 getResults = main.FALSE
2165 elif getResponses[ i ] == main.ERROR:
2166 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002167 sizeResponses = main.Cluster.command( "setTestSize",
2168 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002169 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002170 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002171 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002172 if size != sizeResponses[ i ]:
2173 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002174 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002175 str( size ) + " for set " + main.onosSetName +
2176 " but got " + str( sizeResponses[ i ] ) )
2177 retainResults = retainResults and getResults and sizeResults
2178 utilities.assert_equals( expect=main.TRUE,
2179 actual=retainResults,
2180 onpass="Set retain correct",
2181 onfail="Set retain was incorrect" )
2182
2183 # Transactional maps
2184 main.step( "Partitioned Transactional maps put" )
2185 tMapValue = "Testing"
2186 numKeys = 100
2187 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002188 ctrl = main.Cluster.next()
2189 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002190 if putResponses and len( putResponses ) == 100:
2191 for i in putResponses:
2192 if putResponses[ i ][ 'value' ] != tMapValue:
2193 putResult = False
2194 else:
2195 putResult = False
2196 if not putResult:
2197 main.log.debug( "Put response values: " + str( putResponses ) )
2198 utilities.assert_equals( expect=True,
2199 actual=putResult,
2200 onpass="Partitioned Transactional Map put successful",
2201 onfail="Partitioned Transactional Map put values are incorrect" )
2202
2203 main.step( "Partitioned Transactional maps get" )
2204 # FIXME: is this sleep needed?
2205 time.sleep( 5 )
2206
2207 getCheck = True
2208 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002209 getResponses = main.Cluster.command( "transactionalMapGet",
2210 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002211 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002212 for node in getResponses:
2213 if node != tMapValue:
2214 valueCheck = False
2215 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002216 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002217 main.log.warn( getResponses )
2218 getCheck = getCheck and valueCheck
2219 utilities.assert_equals( expect=True,
2220 actual=getCheck,
2221 onpass="Partitioned Transactional Map get values were correct",
2222 onfail="Partitioned Transactional Map values incorrect" )
2223
2224 # DISTRIBUTED ATOMIC VALUE
2225 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002226 getValues = main.Cluster.command( "valueTestGet",
2227 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002228 main.log.debug( getValues )
2229 # Check the results
2230 atomicValueGetResult = True
2231 expected = valueValue if valueValue is not None else "null"
2232 main.log.debug( "Checking for value of " + expected )
2233 for i in getValues:
2234 if i != expected:
2235 atomicValueGetResult = False
2236 utilities.assert_equals( expect=True,
2237 actual=atomicValueGetResult,
2238 onpass="Atomic Value get successful",
2239 onfail="Error getting atomic Value " +
2240 str( valueValue ) + ", found: " +
2241 str( getValues ) )
2242
2243 main.step( "Atomic Value set()" )
2244 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002245 setValues = main.Cluster.command( "valueTestSet",
2246 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002247 main.log.debug( setValues )
2248 # Check the results
2249 atomicValueSetResults = True
2250 for i in setValues:
2251 if i != main.TRUE:
2252 atomicValueSetResults = False
2253 utilities.assert_equals( expect=True,
2254 actual=atomicValueSetResults,
2255 onpass="Atomic Value set successful",
2256 onfail="Error setting atomic Value" +
2257 str( setValues ) )
2258
2259 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002260 getValues = main.Cluster.command( "valueTestGet",
2261 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002262 main.log.debug( getValues )
2263 # Check the results
2264 atomicValueGetResult = True
2265 expected = valueValue if valueValue is not None else "null"
2266 main.log.debug( "Checking for value of " + expected )
2267 for i in getValues:
2268 if i != expected:
2269 atomicValueGetResult = False
2270 utilities.assert_equals( expect=True,
2271 actual=atomicValueGetResult,
2272 onpass="Atomic Value get successful",
2273 onfail="Error getting atomic Value " +
2274 str( valueValue ) + ", found: " +
2275 str( getValues ) )
2276
2277 main.step( "Atomic Value compareAndSet()" )
2278 oldValue = valueValue
2279 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002280 ctrl = main.Cluster.next()
2281 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002282 main.log.debug( CASValue )
2283 utilities.assert_equals( expect=main.TRUE,
2284 actual=CASValue,
2285 onpass="Atomic Value comapreAndSet successful",
2286 onfail="Error setting atomic Value:" +
2287 str( CASValue ) )
2288
2289 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002290 getValues = main.Cluster.command( "valueTestGet",
2291 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002292 main.log.debug( getValues )
2293 # Check the results
2294 atomicValueGetResult = True
2295 expected = valueValue if valueValue is not None else "null"
2296 main.log.debug( "Checking for value of " + expected )
2297 for i in getValues:
2298 if i != expected:
2299 atomicValueGetResult = False
2300 utilities.assert_equals( expect=True,
2301 actual=atomicValueGetResult,
2302 onpass="Atomic Value get successful",
2303 onfail="Error getting atomic Value " +
2304 str( valueValue ) + ", found: " +
2305 str( getValues ) )
2306
2307 main.step( "Atomic Value getAndSet()" )
2308 oldValue = valueValue
2309 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002310 ctrl = main.Cluster.next()
2311 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002312 main.log.debug( GASValue )
2313 expected = oldValue if oldValue is not None else "null"
2314 utilities.assert_equals( expect=expected,
2315 actual=GASValue,
2316 onpass="Atomic Value GAS successful",
2317 onfail="Error with GetAndSet atomic Value: expected " +
2318 str( expected ) + ", found: " +
2319 str( GASValue ) )
2320
2321 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002322 getValues = main.Cluster.command( "valueTestGet",
2323 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002324 main.log.debug( getValues )
2325 # Check the results
2326 atomicValueGetResult = True
2327 expected = valueValue if valueValue is not None else "null"
2328 main.log.debug( "Checking for value of " + expected )
2329 for i in getValues:
2330 if i != expected:
2331 atomicValueGetResult = False
2332 utilities.assert_equals( expect=True,
2333 actual=atomicValueGetResult,
2334 onpass="Atomic Value get successful",
2335 onfail="Error getting atomic Value: expected " +
2336 str( valueValue ) + ", found: " +
2337 str( getValues ) )
2338
2339 main.step( "Atomic Value destory()" )
2340 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002341 ctrl = main.Cluster.next()
2342 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002343 main.log.debug( destroyResult )
2344 # Check the results
2345 utilities.assert_equals( expect=main.TRUE,
2346 actual=destroyResult,
2347 onpass="Atomic Value destroy successful",
2348 onfail="Error destroying atomic Value" )
2349
2350 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002351 getValues = main.Cluster.command( "valueTestGet",
2352 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002353 main.log.debug( getValues )
2354 # Check the results
2355 atomicValueGetResult = True
2356 expected = valueValue if valueValue is not None else "null"
2357 main.log.debug( "Checking for value of " + expected )
2358 for i in getValues:
2359 if i != expected:
2360 atomicValueGetResult = False
2361 utilities.assert_equals( expect=True,
2362 actual=atomicValueGetResult,
2363 onpass="Atomic Value get successful",
2364 onfail="Error getting atomic Value " +
2365 str( valueValue ) + ", found: " +
2366 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002367
2368 # WORK QUEUES
2369 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002370 ctrl = main.Cluster.next()
2371 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002372 workQueuePending += 1
2373 main.log.debug( addResult )
2374 # Check the results
2375 utilities.assert_equals( expect=main.TRUE,
2376 actual=addResult,
2377 onpass="Work Queue add successful",
2378 onfail="Error adding to Work Queue" )
2379
2380 main.step( "Check the work queue stats" )
2381 statsResults = self.workQueueStatsCheck( workQueueName,
2382 workQueueCompleted,
2383 workQueueInProgress,
2384 workQueuePending )
2385 utilities.assert_equals( expect=True,
2386 actual=statsResults,
2387 onpass="Work Queue stats correct",
2388 onfail="Work Queue stats incorrect " )
2389
2390 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002391 ctrl = main.Cluster.next()
2392 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002393 workQueuePending += 2
2394 main.log.debug( addMultipleResult )
2395 # Check the results
2396 utilities.assert_equals( expect=main.TRUE,
2397 actual=addMultipleResult,
2398 onpass="Work Queue add multiple successful",
2399 onfail="Error adding multiple items to Work Queue" )
2400
2401 main.step( "Check the work queue stats" )
2402 statsResults = self.workQueueStatsCheck( workQueueName,
2403 workQueueCompleted,
2404 workQueueInProgress,
2405 workQueuePending )
2406 utilities.assert_equals( expect=True,
2407 actual=statsResults,
2408 onpass="Work Queue stats correct",
2409 onfail="Work Queue stats incorrect " )
2410
2411 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002412 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002413 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002414 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002415 workQueuePending -= number
2416 workQueueCompleted += number
2417 main.log.debug( take1Result )
2418 # Check the results
2419 utilities.assert_equals( expect=main.TRUE,
2420 actual=take1Result,
2421 onpass="Work Queue takeAndComplete 1 successful",
2422 onfail="Error taking 1 from Work Queue" )
2423
2424 main.step( "Check the work queue stats" )
2425 statsResults = self.workQueueStatsCheck( workQueueName,
2426 workQueueCompleted,
2427 workQueueInProgress,
2428 workQueuePending )
2429 utilities.assert_equals( expect=True,
2430 actual=statsResults,
2431 onpass="Work Queue stats correct",
2432 onfail="Work Queue stats incorrect " )
2433
2434 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002435 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002436 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002437 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002438 workQueuePending -= number
2439 workQueueCompleted += number
2440 main.log.debug( take2Result )
2441 # Check the results
2442 utilities.assert_equals( expect=main.TRUE,
2443 actual=take2Result,
2444 onpass="Work Queue takeAndComplete 2 successful",
2445 onfail="Error taking 2 from Work Queue" )
2446
2447 main.step( "Check the work queue stats" )
2448 statsResults = self.workQueueStatsCheck( workQueueName,
2449 workQueueCompleted,
2450 workQueueInProgress,
2451 workQueuePending )
2452 utilities.assert_equals( expect=True,
2453 actual=statsResults,
2454 onpass="Work Queue stats correct",
2455 onfail="Work Queue stats incorrect " )
2456
2457 main.step( "Work Queue destroy()" )
2458 valueValue = None
2459 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002460 ctrl = main.Cluster.next()
2461 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002462 workQueueCompleted = 0
2463 workQueueInProgress = 0
2464 workQueuePending = 0
2465 main.log.debug( destroyResult )
2466 # Check the results
2467 utilities.assert_equals( expect=main.TRUE,
2468 actual=destroyResult,
2469 onpass="Work Queue destroy successful",
2470 onfail="Error destroying Work Queue" )
2471
2472 main.step( "Check the work queue stats" )
2473 statsResults = self.workQueueStatsCheck( workQueueName,
2474 workQueueCompleted,
2475 workQueueInProgress,
2476 workQueuePending )
2477 utilities.assert_equals( expect=True,
2478 actual=statsResults,
2479 onpass="Work Queue stats correct",
2480 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002481 except Exception as e:
2482 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002483
2484 def cleanUp( self, main ):
2485 """
2486 Clean up
2487 """
Devin Lim58046fa2017-07-05 16:55:00 -07002488 assert main, "main not defined"
2489 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002490
2491 # printing colors to terminal
2492 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2493 'blue': '\033[94m', 'green': '\033[92m',
2494 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002495
Devin Lim58046fa2017-07-05 16:55:00 -07002496 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002497
2498 main.step( "Checking raft log size" )
2499 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2500 # get compacted periodically
2501 logCheck = main.Cluster.checkPartitionSize()
2502 utilities.assert_equals( expect=True, actual=logCheck,
2503 onpass="Raft log size is not too big",
2504 onfail="Raft logs grew too big" )
2505
Devin Lim58046fa2017-07-05 16:55:00 -07002506 main.step( "Killing tcpdumps" )
2507 main.Mininet2.stopTcpdump()
2508
2509 testname = main.TEST
2510 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2511 main.step( "Copying MN pcap and ONOS log files to test station" )
2512 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2513 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2514 # NOTE: MN Pcap file is being saved to logdir.
2515 # We scp this file as MN and TestON aren't necessarily the same vm
2516
2517 # FIXME: To be replaced with a Jenkin's post script
2518 # TODO: Load these from params
2519 # NOTE: must end in /
2520 logFolder = "/opt/onos/log/"
2521 logFiles = [ "karaf.log", "karaf.log.1" ]
2522 # NOTE: must end in /
2523 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002524 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002525 dstName = main.logdir + "/" + ctrl.name + "-" + f
2526 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002527 logFolder + f, dstName )
2528 # std*.log's
2529 # NOTE: must end in /
2530 logFolder = "/opt/onos/var/"
2531 logFiles = [ "stderr.log", "stdout.log" ]
2532 # NOTE: must end in /
2533 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002534 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002535 dstName = main.logdir + "/" + ctrl.name + "-" + f
2536 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002537 logFolder + f, dstName )
2538 else:
2539 main.log.debug( "skipping saving log files" )
2540
Jon Hall5d5876e2017-11-30 09:33:16 -08002541 main.step( "Checking ONOS Logs for errors" )
2542 for ctrl in main.Cluster.runningNodes:
2543 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2544 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2545
Devin Lim58046fa2017-07-05 16:55:00 -07002546 main.step( "Stopping Mininet" )
2547 mnResult = main.Mininet1.stopNet()
2548 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2549 onpass="Mininet stopped",
2550 onfail="MN cleanup NOT successful" )
2551
Devin Lim58046fa2017-07-05 16:55:00 -07002552 try:
2553 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2554 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2555 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2556 timerLog.close()
2557 except NameError as e:
2558 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002559
Devin Lim58046fa2017-07-05 16:55:00 -07002560 def assignMastership( self, main ):
2561 """
2562 Assign mastership to controllers
2563 """
2564 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002565 assert main, "main not defined"
2566 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002567
2568 main.case( "Assigning Controller roles for switches" )
2569 main.caseExplanation = "Check that ONOS is connected to each " +\
2570 "device. Then manually assign" +\
2571 " mastership to specific ONOS nodes using" +\
2572 " 'device-role'"
2573 main.step( "Assign mastership of switches to specific controllers" )
2574 # Manually assign mastership to the controller we want
2575 roleCall = main.TRUE
2576
2577 ipList = []
2578 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002579 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002580 try:
2581 # Assign mastership to specific controllers. This assignment was
2582 # determined for a 7 node cluser, but will work with any sized
2583 # cluster
2584 for i in range( 1, 29 ): # switches 1 through 28
2585 # set up correct variables:
2586 if i == 1:
2587 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002588 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002589 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2590 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002591 c = 1 % main.Cluster.numCtrls
2592 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002593 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2594 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002595 c = 1 % main.Cluster.numCtrls
2596 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002597 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2598 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002599 c = 3 % main.Cluster.numCtrls
2600 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002601 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2602 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002603 c = 2 % main.Cluster.numCtrls
2604 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002605 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2606 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002607 c = 2 % main.Cluster.numCtrls
2608 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002609 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2610 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002611 c = 5 % main.Cluster.numCtrls
2612 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002613 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2614 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002615 c = 4 % main.Cluster.numCtrls
2616 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002617 dpid = '3' + str( i ).zfill( 3 )
2618 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2619 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002620 c = 6 % main.Cluster.numCtrls
2621 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002622 dpid = '6' + str( i ).zfill( 3 )
2623 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2624 elif i == 28:
2625 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002626 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002627 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2628 else:
2629 main.log.error( "You didn't write an else statement for " +
2630 "switch s" + str( i ) )
2631 roleCall = main.FALSE
2632 # Assign switch
2633 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2634 # TODO: make this controller dynamic
2635 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2636 ipList.append( ip )
2637 deviceList.append( deviceId )
2638 except ( AttributeError, AssertionError ):
2639 main.log.exception( "Something is wrong with ONOS device view" )
2640 main.log.info( onosCli.devices() )
2641 utilities.assert_equals(
2642 expect=main.TRUE,
2643 actual=roleCall,
2644 onpass="Re-assigned switch mastership to designated controller",
2645 onfail="Something wrong with deviceRole calls" )
2646
2647 main.step( "Check mastership was correctly assigned" )
2648 roleCheck = main.TRUE
2649 # NOTE: This is due to the fact that device mastership change is not
2650 # atomic and is actually a multi step process
2651 time.sleep( 5 )
2652 for i in range( len( ipList ) ):
2653 ip = ipList[ i ]
2654 deviceId = deviceList[ i ]
2655 # Check assignment
2656 master = onosCli.getRole( deviceId ).get( 'master' )
2657 if ip in master:
2658 roleCheck = roleCheck and main.TRUE
2659 else:
2660 roleCheck = roleCheck and main.FALSE
2661 main.log.error( "Error, controller " + ip + " is not" +
2662 " master " + "of device " +
2663 str( deviceId ) + ". Master is " +
2664 repr( master ) + "." )
2665 utilities.assert_equals(
2666 expect=main.TRUE,
2667 actual=roleCheck,
2668 onpass="Switches were successfully reassigned to designated " +
2669 "controller",
2670 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002671
Jon Hall5d5876e2017-11-30 09:33:16 -08002672 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002673 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002674 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002675 """
2676 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002677 assert main, "main not defined"
2678 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002679 assert main.kill, "main.kill not defined"
2680 main.case( "Restart minority of ONOS nodes" )
2681
2682 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2683 startResults = main.TRUE
2684 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002685 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002686 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002687 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002688 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2689 onpass="ONOS nodes started successfully",
2690 onfail="ONOS nodes NOT successfully started" )
2691
2692 main.step( "Checking if ONOS is up yet" )
2693 count = 0
2694 onosIsupResult = main.FALSE
2695 while onosIsupResult == main.FALSE and count < 10:
2696 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002697 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002698 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002699 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002700 count = count + 1
2701 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2702 onpass="ONOS restarted successfully",
2703 onfail="ONOS restart NOT successful" )
2704
Jon Hall5d5876e2017-11-30 09:33:16 -08002705 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002706 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002707 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002708 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002709 ctrl.startOnosCli( ctrl.ipAddress )
2710 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002711 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002712 onpass="ONOS node(s) restarted",
2713 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002714
Jon Hall5d5876e2017-11-30 09:33:16 -08002715 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002716 main.restartTime = time.time() - restartTime
2717 main.log.debug( "Restart time: " + str( main.restartTime ) )
2718 # TODO: MAke this configurable. Also, we are breaking the above timer
2719 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002720 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002721 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002722 sleep=15,
2723 attempts=5 )
2724
2725 utilities.assert_equals( expect=True, actual=nodeResults,
2726 onpass="Nodes check successful",
2727 onfail="Nodes check NOT successful" )
2728
2729 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002730 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002731 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002732 ctrl.name,
2733 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002734 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002735 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002736
Jon Hallca319892017-06-15 15:25:22 -07002737 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002738
2739 main.step( "Rerun for election on the node(s) that were killed" )
2740 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002741 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002742 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002743 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002744 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2745 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002746 onfail="Error rerunning for election" )
2747
2748 def upgradeNodes( self, main ):
2749 """
2750 Reinstall some nodes with an upgraded version.
2751
2752 This will reinstall nodes in main.kill with an upgraded version.
2753 """
2754 import time
2755 assert main, "main not defined"
2756 assert utilities.assert_equals, "utilities.assert_equals not defined"
2757 assert main.kill, "main.kill not defined"
2758 nodeNames = [ node.name for node in main.kill ]
2759 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2760
2761 stopResults = main.TRUE
2762 uninstallResults = main.TRUE
2763 startResults = main.TRUE
2764 sshResults = main.TRUE
2765 isup = main.TRUE
2766 restartTime = time.time()
2767 for ctrl in main.kill:
2768 stopResults = stopResults and\
2769 ctrl.onosStop( ctrl.ipAddress )
2770 uninstallResults = uninstallResults and\
2771 ctrl.onosUninstall( ctrl.ipAddress )
2772 # Install the new version of onos
2773 startResults = startResults and\
2774 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2775 sshResults = sshResults and\
2776 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2777 isup = isup and ctrl.isup( ctrl.ipAddress )
2778 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2779 onpass="ONOS nodes stopped successfully",
2780 onfail="ONOS nodes NOT successfully stopped" )
2781 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2782 onpass="ONOS nodes uninstalled successfully",
2783 onfail="ONOS nodes NOT successfully uninstalled" )
2784 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2785 onpass="ONOS nodes started successfully",
2786 onfail="ONOS nodes NOT successfully started" )
2787 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2788 onpass="Successfully secured onos ssh",
2789 onfail="Failed to secure onos ssh" )
2790 utilities.assert_equals( expect=main.TRUE, actual=isup,
2791 onpass="ONOS nodes fully started",
2792 onfail="ONOS nodes NOT fully started" )
2793
2794 main.step( "Restarting ONOS CLI" )
2795 cliResults = main.TRUE
2796 for ctrl in main.kill:
2797 cliResults = cliResults and\
2798 ctrl.startOnosCli( ctrl.ipAddress )
2799 ctrl.active = True
2800 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2801 onpass="ONOS node(s) restarted",
2802 onfail="ONOS node(s) did not restart" )
2803
2804 # Grab the time of restart so we can have some idea of average time
2805 main.restartTime = time.time() - restartTime
2806 main.log.debug( "Restart time: " + str( main.restartTime ) )
2807 # TODO: Make this configurable.
2808 main.step( "Checking ONOS nodes" )
2809 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2810 False,
2811 sleep=15,
2812 attempts=5 )
2813
2814 utilities.assert_equals( expect=True, actual=nodeResults,
2815 onpass="Nodes check successful",
2816 onfail="Nodes check NOT successful" )
2817
2818 if not nodeResults:
2819 for ctrl in main.Cluster.active():
2820 main.log.debug( "{} components not ACTIVE: \n{}".format(
2821 ctrl.name,
2822 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
2823 main.log.error( "Failed to start ONOS, stopping test" )
2824 main.cleanAndExit()
2825
2826 self.commonChecks()
2827
2828 main.step( "Rerun for election on the node(s) that were killed" )
2829 runResults = main.TRUE
2830 for ctrl in main.kill:
2831 runResults = runResults and\
2832 ctrl.electionTestRun()
2833 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2834 onpass="ONOS nodes reran for election topic",
2835 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002836
Devin Lim142b5342017-07-20 15:22:39 -07002837 def tempCell( self, cellName, ipList ):
2838 main.step( "Create cell file" )
2839 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002840
Devin Lim142b5342017-07-20 15:22:39 -07002841 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2842 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002843 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002844 main.step( "Applying cell variable to environment" )
2845 cellResult = main.ONOSbench.setCell( cellName )
2846 verifyResult = main.ONOSbench.verifyCell()
2847
Devin Lim142b5342017-07-20 15:22:39 -07002848 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002849 """
2850 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002851 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002852 1: scaling
2853 """
2854 """
2855 Check state after ONOS failure/scaling
2856 """
2857 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002858 assert main, "main not defined"
2859 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002860 main.case( "Running ONOS Constant State Tests" )
2861
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002862 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002863
Devin Lim58046fa2017-07-05 16:55:00 -07002864 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002865 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002866
Devin Lim142b5342017-07-20 15:22:39 -07002867 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002868 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002869
2870 if rolesResults and not consistentMastership:
2871 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002872 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002873 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002874 json.dumps( json.loads( ONOSMastership[ i ] ),
2875 sort_keys=True,
2876 indent=4,
2877 separators=( ',', ': ' ) ) )
2878
2879 if compareSwitch:
2880 description2 = "Compare switch roles from before failure"
2881 main.step( description2 )
2882 try:
2883 currentJson = json.loads( ONOSMastership[ 0 ] )
2884 oldJson = json.loads( mastershipState )
2885 except ( ValueError, TypeError ):
2886 main.log.exception( "Something is wrong with parsing " +
2887 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002888 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2889 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002890 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002891 mastershipCheck = main.TRUE
2892 for i in range( 1, 29 ):
2893 switchDPID = str(
2894 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2895 current = [ switch[ 'master' ] for switch in currentJson
2896 if switchDPID in switch[ 'id' ] ]
2897 old = [ switch[ 'master' ] for switch in oldJson
2898 if switchDPID in switch[ 'id' ] ]
2899 if current == old:
2900 mastershipCheck = mastershipCheck and main.TRUE
2901 else:
2902 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2903 mastershipCheck = main.FALSE
2904 utilities.assert_equals(
2905 expect=main.TRUE,
2906 actual=mastershipCheck,
2907 onpass="Mastership of Switches was not changed",
2908 onfail="Mastership of some switches changed" )
2909
2910 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002911 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002912 intentCheck = main.FALSE
2913 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002914
2915 main.step( "Check for consistency in Intents from each controller" )
2916 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2917 main.log.info( "Intents are consistent across all ONOS " +
2918 "nodes" )
2919 else:
2920 consistentIntents = False
2921
2922 # Try to make it easy to figure out what is happening
2923 #
2924 # Intent ONOS1 ONOS2 ...
2925 # 0x01 INSTALLED INSTALLING
2926 # ... ... ...
2927 # ... ... ...
2928 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002929 for ctrl in main.Cluster.active():
2930 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002931 main.log.warn( title )
2932 # get all intent keys in the cluster
2933 keys = []
2934 for nodeStr in ONOSIntents:
2935 node = json.loads( nodeStr )
2936 for intent in node:
2937 keys.append( intent.get( 'id' ) )
2938 keys = set( keys )
2939 for key in keys:
2940 row = "%-13s" % key
2941 for nodeStr in ONOSIntents:
2942 node = json.loads( nodeStr )
2943 for intent in node:
2944 if intent.get( 'id' ) == key:
2945 row += "%-15s" % intent.get( 'state' )
2946 main.log.warn( row )
2947 # End table view
2948
2949 utilities.assert_equals(
2950 expect=True,
2951 actual=consistentIntents,
2952 onpass="Intents are consistent across all ONOS nodes",
2953 onfail="ONOS nodes have different views of intents" )
2954 intentStates = []
2955 for node in ONOSIntents: # Iter through ONOS nodes
2956 nodeStates = []
2957 # Iter through intents of a node
2958 try:
2959 for intent in json.loads( node ):
2960 nodeStates.append( intent[ 'state' ] )
2961 except ( ValueError, TypeError ):
2962 main.log.exception( "Error in parsing intents" )
2963 main.log.error( repr( node ) )
2964 intentStates.append( nodeStates )
2965 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2966 main.log.info( dict( out ) )
2967
2968 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002969 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002970 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002971 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002972 main.log.warn( json.dumps(
2973 json.loads( ONOSIntents[ i ] ),
2974 sort_keys=True,
2975 indent=4,
2976 separators=( ',', ': ' ) ) )
2977 elif intentsResults and consistentIntents:
2978 intentCheck = main.TRUE
2979
2980 # NOTE: Store has no durability, so intents are lost across system
2981 # restarts
2982 if not isRestart:
2983 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2984 # NOTE: this requires case 5 to pass for intentState to be set.
2985 # maybe we should stop the test if that fails?
2986 sameIntents = main.FALSE
2987 try:
2988 intentState
2989 except NameError:
2990 main.log.warn( "No previous intent state was saved" )
2991 else:
2992 if intentState and intentState == ONOSIntents[ 0 ]:
2993 sameIntents = main.TRUE
2994 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2995 # TODO: possibly the states have changed? we may need to figure out
2996 # what the acceptable states are
2997 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2998 sameIntents = main.TRUE
2999 try:
3000 before = json.loads( intentState )
3001 after = json.loads( ONOSIntents[ 0 ] )
3002 for intent in before:
3003 if intent not in after:
3004 sameIntents = main.FALSE
3005 main.log.debug( "Intent is not currently in ONOS " +
3006 "(at least in the same form):" )
3007 main.log.debug( json.dumps( intent ) )
3008 except ( ValueError, TypeError ):
3009 main.log.exception( "Exception printing intents" )
3010 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3011 main.log.debug( repr( intentState ) )
3012 if sameIntents == main.FALSE:
3013 try:
3014 main.log.debug( "ONOS intents before: " )
3015 main.log.debug( json.dumps( json.loads( intentState ),
3016 sort_keys=True, indent=4,
3017 separators=( ',', ': ' ) ) )
3018 main.log.debug( "Current ONOS intents: " )
3019 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3020 sort_keys=True, indent=4,
3021 separators=( ',', ': ' ) ) )
3022 except ( ValueError, TypeError ):
3023 main.log.exception( "Exception printing intents" )
3024 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3025 main.log.debug( repr( intentState ) )
3026 utilities.assert_equals(
3027 expect=main.TRUE,
3028 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003029 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003030 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3031 intentCheck = intentCheck and sameIntents
3032
3033 main.step( "Get the OF Table entries and compare to before " +
3034 "component " + OnosAfterWhich[ afterWhich ] )
3035 FlowTables = main.TRUE
3036 for i in range( 28 ):
3037 main.log.info( "Checking flow table on s" + str( i + 1 ) )
3038 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
3039 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
3040 FlowTables = FlowTables and curSwitch
3041 if curSwitch == main.FALSE:
3042 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
3043 utilities.assert_equals(
3044 expect=main.TRUE,
3045 actual=FlowTables,
3046 onpass="No changes were found in the flow tables",
3047 onfail="Changes were found in the flow tables" )
3048
Jon Hallca319892017-06-15 15:25:22 -07003049 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003050 """
3051 main.step( "Check the continuous pings to ensure that no packets " +
3052 "were dropped during component failure" )
3053 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3054 main.params[ 'TESTONIP' ] )
3055 LossInPings = main.FALSE
3056 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3057 for i in range( 8, 18 ):
3058 main.log.info(
3059 "Checking for a loss in pings along flow from s" +
3060 str( i ) )
3061 LossInPings = main.Mininet2.checkForLoss(
3062 "/tmp/ping.h" +
3063 str( i ) ) or LossInPings
3064 if LossInPings == main.TRUE:
3065 main.log.info( "Loss in ping detected" )
3066 elif LossInPings == main.ERROR:
3067 main.log.info( "There are multiple mininet process running" )
3068 elif LossInPings == main.FALSE:
3069 main.log.info( "No Loss in the pings" )
3070 main.log.info( "No loss of dataplane connectivity" )
3071 utilities.assert_equals(
3072 expect=main.FALSE,
3073 actual=LossInPings,
3074 onpass="No Loss of connectivity",
3075 onfail="Loss of dataplane connectivity detected" )
3076 # NOTE: Since intents are not persisted with IntnentStore,
3077 # we expect loss in dataplane connectivity
3078 LossInPings = main.FALSE
3079 """
Devin Lim58046fa2017-07-05 16:55:00 -07003080 def compareTopo( self, main ):
3081 """
3082 Compare topo
3083 """
3084 import json
3085 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003086 assert main, "main not defined"
3087 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003088 try:
3089 from tests.dependencies.topology import Topology
3090 except ImportError:
3091 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003092 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003093 try:
3094 main.topoRelated
3095 except ( NameError, AttributeError ):
3096 main.topoRelated = Topology()
3097 main.case( "Compare ONOS Topology view to Mininet topology" )
3098 main.caseExplanation = "Compare topology objects between Mininet" +\
3099 " and ONOS"
3100 topoResult = main.FALSE
3101 topoFailMsg = "ONOS topology don't match Mininet"
3102 elapsed = 0
3103 count = 0
3104 main.step( "Comparing ONOS topology to MN topology" )
3105 startTime = time.time()
3106 # Give time for Gossip to work
3107 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3108 devicesResults = main.TRUE
3109 linksResults = main.TRUE
3110 hostsResults = main.TRUE
3111 hostAttachmentResults = True
3112 count += 1
3113 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003114 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003115 kwargs={ 'sleep': 5, 'attempts': 5,
3116 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003117 ipResult = main.TRUE
3118
Devin Lim142b5342017-07-20 15:22:39 -07003119 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003120 kwargs={ 'sleep': 5, 'attempts': 5,
3121 'randomTime': True },
3122 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003123
3124 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003125 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003126 if hosts[ controller ]:
3127 for host in hosts[ controller ]:
3128 if host is None or host.get( 'ipAddresses', [] ) == []:
3129 main.log.error(
3130 "Error with host ipAddresses on controller" +
3131 controllerStr + ": " + str( host ) )
3132 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003133 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003134 kwargs={ 'sleep': 5, 'attempts': 5,
3135 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003136 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003137 kwargs={ 'sleep': 5, 'attempts': 5,
3138 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003139 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003140 kwargs={ 'sleep': 5, 'attempts': 5,
3141 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003142
3143 elapsed = time.time() - startTime
3144 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003145 main.log.debug( "Elapsed time: " + str( elapsed ) )
3146 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003147
3148 if all( e is None for e in devices ) and\
3149 all( e is None for e in hosts ) and\
3150 all( e is None for e in ports ) and\
3151 all( e is None for e in links ) and\
3152 all( e is None for e in clusters ):
3153 topoFailMsg = "Could not get topology from ONOS"
3154 main.log.error( topoFailMsg )
3155 continue # Try again, No use trying to compare
3156
3157 mnSwitches = main.Mininet1.getSwitches()
3158 mnLinks = main.Mininet1.getLinks()
3159 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003160 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003161 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003162 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3163 controller,
3164 mnSwitches,
3165 devices,
3166 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003167 utilities.assert_equals( expect=main.TRUE,
3168 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003169 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003170 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003171 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003172 " Switches view is incorrect" )
3173
Devin Lim58046fa2017-07-05 16:55:00 -07003174 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003175 main.Mininet1.compareLinks,
3176 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003177 utilities.assert_equals( expect=main.TRUE,
3178 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003179 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003180 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003181 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003182 " links view is incorrect" )
3183 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3184 currentHostsResult = main.Mininet1.compareHosts(
3185 mnHosts,
3186 hosts[ controller ] )
3187 elif hosts[ controller ] == []:
3188 currentHostsResult = main.TRUE
3189 else:
3190 currentHostsResult = main.FALSE
3191 utilities.assert_equals( expect=main.TRUE,
3192 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003193 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003194 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003195 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003196 " hosts don't match Mininet" )
3197 # CHECKING HOST ATTACHMENT POINTS
3198 hostAttachment = True
3199 zeroHosts = False
3200 # FIXME: topo-HA/obelisk specific mappings:
3201 # key is mac and value is dpid
3202 mappings = {}
3203 for i in range( 1, 29 ): # hosts 1 through 28
3204 # set up correct variables:
3205 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3206 if i == 1:
3207 deviceId = "1000".zfill( 16 )
3208 elif i == 2:
3209 deviceId = "2000".zfill( 16 )
3210 elif i == 3:
3211 deviceId = "3000".zfill( 16 )
3212 elif i == 4:
3213 deviceId = "3004".zfill( 16 )
3214 elif i == 5:
3215 deviceId = "5000".zfill( 16 )
3216 elif i == 6:
3217 deviceId = "6000".zfill( 16 )
3218 elif i == 7:
3219 deviceId = "6007".zfill( 16 )
3220 elif i >= 8 and i <= 17:
3221 dpid = '3' + str( i ).zfill( 3 )
3222 deviceId = dpid.zfill( 16 )
3223 elif i >= 18 and i <= 27:
3224 dpid = '6' + str( i ).zfill( 3 )
3225 deviceId = dpid.zfill( 16 )
3226 elif i == 28:
3227 deviceId = "2800".zfill( 16 )
3228 mappings[ macId ] = deviceId
3229 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3230 if hosts[ controller ] == []:
3231 main.log.warn( "There are no hosts discovered" )
3232 zeroHosts = True
3233 else:
3234 for host in hosts[ controller ]:
3235 mac = None
3236 location = None
3237 device = None
3238 port = None
3239 try:
3240 mac = host.get( 'mac' )
3241 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003242 print host
3243 if 'locations' in host:
3244 location = host.get( 'locations' )[ 0 ]
3245 elif 'location' in host:
3246 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003247 assert location, "location field could not be found for this host object"
3248
3249 # Trim the protocol identifier off deviceId
3250 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3251 assert device, "elementId field could not be found for this host location object"
3252
3253 port = location.get( 'port' )
3254 assert port, "port field could not be found for this host location object"
3255
3256 # Now check if this matches where they should be
3257 if mac and device and port:
3258 if str( port ) != "1":
3259 main.log.error( "The attachment port is incorrect for " +
3260 "host " + str( mac ) +
3261 ". Expected: 1 Actual: " + str( port ) )
3262 hostAttachment = False
3263 if device != mappings[ str( mac ) ]:
3264 main.log.error( "The attachment device is incorrect for " +
3265 "host " + str( mac ) +
3266 ". Expected: " + mappings[ str( mac ) ] +
3267 " Actual: " + device )
3268 hostAttachment = False
3269 else:
3270 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003271 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003272 main.log.exception( "Json object not as expected" )
3273 main.log.error( repr( host ) )
3274 hostAttachment = False
3275 else:
3276 main.log.error( "No hosts json output or \"Error\"" +
3277 " in output. hosts = " +
3278 repr( hosts[ controller ] ) )
3279 if zeroHosts is False:
3280 # TODO: Find a way to know if there should be hosts in a
3281 # given point of the test
3282 hostAttachment = True
3283
3284 # END CHECKING HOST ATTACHMENT POINTS
3285 devicesResults = devicesResults and currentDevicesResult
3286 linksResults = linksResults and currentLinksResult
3287 hostsResults = hostsResults and currentHostsResult
3288 hostAttachmentResults = hostAttachmentResults and\
3289 hostAttachment
3290 topoResult = ( devicesResults and linksResults
3291 and hostsResults and ipResult and
3292 hostAttachmentResults )
3293 utilities.assert_equals( expect=True,
3294 actual=topoResult,
3295 onpass="ONOS topology matches Mininet",
3296 onfail=topoFailMsg )
3297 # End of While loop to pull ONOS state
3298
3299 # Compare json objects for hosts and dataplane clusters
3300
3301 # hosts
3302 main.step( "Hosts view is consistent across all ONOS nodes" )
3303 consistentHostsResult = main.TRUE
3304 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003305 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003306 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3307 if hosts[ controller ] == hosts[ 0 ]:
3308 continue
3309 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003310 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003311 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003312 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003313 consistentHostsResult = main.FALSE
3314
3315 else:
Jon Hallca319892017-06-15 15:25:22 -07003316 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003317 controllerStr )
3318 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003319 main.log.debug( controllerStr +
3320 " hosts response: " +
3321 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003322 utilities.assert_equals(
3323 expect=main.TRUE,
3324 actual=consistentHostsResult,
3325 onpass="Hosts view is consistent across all ONOS nodes",
3326 onfail="ONOS nodes have different views of hosts" )
3327
3328 main.step( "Hosts information is correct" )
3329 hostsResults = hostsResults and ipResult
3330 utilities.assert_equals(
3331 expect=main.TRUE,
3332 actual=hostsResults,
3333 onpass="Host information is correct",
3334 onfail="Host information is incorrect" )
3335
3336 main.step( "Host attachment points to the network" )
3337 utilities.assert_equals(
3338 expect=True,
3339 actual=hostAttachmentResults,
3340 onpass="Hosts are correctly attached to the network",
3341 onfail="ONOS did not correctly attach hosts to the network" )
3342
3343 # Strongly connected clusters of devices
3344 main.step( "Clusters view is consistent across all ONOS nodes" )
3345 consistentClustersResult = main.TRUE
3346 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003347 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003348 if "Error" not in clusters[ controller ]:
3349 if clusters[ controller ] == clusters[ 0 ]:
3350 continue
3351 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003352 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003353 controllerStr +
3354 " is inconsistent with ONOS1" )
3355 consistentClustersResult = main.FALSE
3356 else:
3357 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003358 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003359 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003360 main.log.debug( controllerStr +
3361 " clusters response: " +
3362 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003363 utilities.assert_equals(
3364 expect=main.TRUE,
3365 actual=consistentClustersResult,
3366 onpass="Clusters view is consistent across all ONOS nodes",
3367 onfail="ONOS nodes have different views of clusters" )
3368 if not consistentClustersResult:
3369 main.log.debug( clusters )
3370 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003371 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003372
3373 main.step( "There is only one SCC" )
3374 # there should always only be one cluster
3375 try:
3376 numClusters = len( json.loads( clusters[ 0 ] ) )
3377 except ( ValueError, TypeError ):
3378 main.log.exception( "Error parsing clusters[0]: " +
3379 repr( clusters[ 0 ] ) )
3380 numClusters = "ERROR"
3381 clusterResults = main.FALSE
3382 if numClusters == 1:
3383 clusterResults = main.TRUE
3384 utilities.assert_equals(
3385 expect=1,
3386 actual=numClusters,
3387 onpass="ONOS shows 1 SCC",
3388 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3389
3390 topoResult = ( devicesResults and linksResults
3391 and hostsResults and consistentHostsResult
3392 and consistentClustersResult and clusterResults
3393 and ipResult and hostAttachmentResults )
3394
3395 topoResult = topoResult and int( count <= 2 )
3396 note = "note it takes about " + str( int( cliTime ) ) + \
3397 " seconds for the test to make all the cli calls to fetch " +\
3398 "the topology from each ONOS instance"
3399 main.log.info(
3400 "Very crass estimate for topology discovery/convergence( " +
3401 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3402 str( count ) + " tries" )
3403
3404 main.step( "Device information is correct" )
3405 utilities.assert_equals(
3406 expect=main.TRUE,
3407 actual=devicesResults,
3408 onpass="Device information is correct",
3409 onfail="Device information is incorrect" )
3410
3411 main.step( "Links are correct" )
3412 utilities.assert_equals(
3413 expect=main.TRUE,
3414 actual=linksResults,
3415 onpass="Link are correct",
3416 onfail="Links are incorrect" )
3417
3418 main.step( "Hosts are correct" )
3419 utilities.assert_equals(
3420 expect=main.TRUE,
3421 actual=hostsResults,
3422 onpass="Hosts are correct",
3423 onfail="Hosts are incorrect" )
3424
3425 # FIXME: move this to an ONOS state case
3426 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003427 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003428 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003429 attempts=5 )
3430 utilities.assert_equals( expect=True, actual=nodeResults,
3431 onpass="Nodes check successful",
3432 onfail="Nodes check NOT successful" )
3433 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003434 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003435 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003436 ctrl.name,
3437 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003438
3439 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003440 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003441
Devin Lim58046fa2017-07-05 16:55:00 -07003442 def linkDown( self, main, fromS="s3", toS="s28" ):
3443 """
3444 Link fromS-toS down
3445 """
3446 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003447 assert main, "main not defined"
3448 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003449 # NOTE: You should probably run a topology check after this
3450
3451 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3452
3453 description = "Turn off a link to ensure that Link Discovery " +\
3454 "is working properly"
3455 main.case( description )
3456
3457 main.step( "Kill Link between " + fromS + " and " + toS )
3458 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3459 main.log.info( "Waiting " + str( linkSleep ) +
3460 " seconds for link down to be discovered" )
3461 time.sleep( linkSleep )
3462 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3463 onpass="Link down successful",
3464 onfail="Failed to bring link down" )
3465 # TODO do some sort of check here
3466
3467 def linkUp( self, main, fromS="s3", toS="s28" ):
3468 """
3469 Link fromS-toS up
3470 """
3471 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003472 assert main, "main not defined"
3473 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003474 # NOTE: You should probably run a topology check after this
3475
3476 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3477
3478 description = "Restore a link to ensure that Link Discovery is " + \
3479 "working properly"
3480 main.case( description )
3481
Jon Hall4173b242017-09-12 17:04:38 -07003482 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003483 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3484 main.log.info( "Waiting " + str( linkSleep ) +
3485 " seconds for link up to be discovered" )
3486 time.sleep( linkSleep )
3487 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3488 onpass="Link up successful",
3489 onfail="Failed to bring link up" )
3490
3491 def switchDown( self, main ):
3492 """
3493 Switch Down
3494 """
3495 # NOTE: You should probably run a topology check after this
3496 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003497 assert main, "main not defined"
3498 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003499
3500 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3501
3502 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003503 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003504 main.case( description )
3505 switch = main.params[ 'kill' ][ 'switch' ]
3506 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3507
3508 # TODO: Make this switch parameterizable
3509 main.step( "Kill " + switch )
3510 main.log.info( "Deleting " + switch )
3511 main.Mininet1.delSwitch( switch )
3512 main.log.info( "Waiting " + str( switchSleep ) +
3513 " seconds for switch down to be discovered" )
3514 time.sleep( switchSleep )
3515 device = onosCli.getDevice( dpid=switchDPID )
3516 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003517 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003518 result = main.FALSE
3519 if device and device[ 'available' ] is False:
3520 result = main.TRUE
3521 utilities.assert_equals( expect=main.TRUE, actual=result,
3522 onpass="Kill switch successful",
3523 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003524
Devin Lim58046fa2017-07-05 16:55:00 -07003525 def switchUp( self, main ):
3526 """
3527 Switch Up
3528 """
3529 # NOTE: You should probably run a topology check after this
3530 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003531 assert main, "main not defined"
3532 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003533
3534 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3535 switch = main.params[ 'kill' ][ 'switch' ]
3536 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3537 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003538 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003539 description = "Adding a switch to ensure it is discovered correctly"
3540 main.case( description )
3541
3542 main.step( "Add back " + switch )
3543 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3544 for peer in links:
3545 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003546 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003547 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3548 main.log.info( "Waiting " + str( switchSleep ) +
3549 " seconds for switch up to be discovered" )
3550 time.sleep( switchSleep )
3551 device = onosCli.getDevice( dpid=switchDPID )
3552 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003553 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003554 result = main.FALSE
3555 if device and device[ 'available' ]:
3556 result = main.TRUE
3557 utilities.assert_equals( expect=main.TRUE, actual=result,
3558 onpass="add switch successful",
3559 onfail="Failed to add switch?" )
3560
3561 def startElectionApp( self, main ):
3562 """
3563 start election app on all onos nodes
3564 """
Devin Lim58046fa2017-07-05 16:55:00 -07003565 assert main, "main not defined"
3566 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003567
3568 main.case( "Start Leadership Election app" )
3569 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003570 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003571 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003572 utilities.assert_equals(
3573 expect=main.TRUE,
3574 actual=appResult,
3575 onpass="Election app installed",
3576 onfail="Something went wrong with installing Leadership election" )
3577
3578 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003579 onosCli.electionTestRun()
3580 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003581 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003582 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003583 utilities.assert_equals(
3584 expect=True,
3585 actual=sameResult,
3586 onpass="All nodes see the same leaderboards",
3587 onfail="Inconsistent leaderboards" )
3588
3589 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003590 # Check that the leader is one of the active nodes
3591 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003592 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003593 if leader in ips:
3594 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003595 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003596 legitimate = False
3597 main.log.debug( leaders )
3598 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003599 utilities.assert_equals(
3600 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003601 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003602 onpass="Correct leader was elected",
3603 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003604 main.Cluster.testLeader = leader
3605
Devin Lim58046fa2017-07-05 16:55:00 -07003606 def isElectionFunctional( self, main ):
3607 """
3608 Check that Leadership Election is still functional
3609 15.1 Run election on each node
3610 15.2 Check that each node has the same leaders and candidates
3611 15.3 Find current leader and withdraw
3612 15.4 Check that a new node was elected leader
3613 15.5 Check that that new leader was the candidate of old leader
3614 15.6 Run for election on old leader
3615 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3616 15.8 Make sure that the old leader was added to the candidate list
3617
3618 old and new variable prefixes refer to data from before vs after
3619 withdrawl and later before withdrawl vs after re-election
3620 """
3621 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003622 assert main, "main not defined"
3623 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003624
3625 description = "Check that Leadership Election is still functional"
3626 main.case( description )
3627 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3628
3629 oldLeaders = [] # list of lists of each nodes' candidates before
3630 newLeaders = [] # list of lists of each nodes' candidates after
3631 oldLeader = '' # the old leader from oldLeaders, None if not same
3632 newLeader = '' # the new leaders fron newLoeaders, None if not same
3633 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3634 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003635 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003636 expectNoLeader = True
3637
3638 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003639 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003640 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003641 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003642 actual=electionResult,
3643 onpass="All nodes successfully ran for leadership",
3644 onfail="At least one node failed to run for leadership" )
3645
3646 if electionResult == main.FALSE:
3647 main.log.error(
3648 "Skipping Test Case because Election Test App isn't loaded" )
3649 main.skipCase()
3650
3651 main.step( "Check that each node shows the same leader and candidates" )
3652 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003653 activeCLIs = main.Cluster.active()
3654 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003655 if sameResult:
3656 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003657 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003658 else:
3659 oldLeader = None
3660 utilities.assert_equals(
3661 expect=True,
3662 actual=sameResult,
3663 onpass="Leaderboards are consistent for the election topic",
3664 onfail=failMessage )
3665
3666 main.step( "Find current leader and withdraw" )
3667 withdrawResult = main.TRUE
3668 # do some sanity checking on leader before using it
3669 if oldLeader is None:
3670 main.log.error( "Leadership isn't consistent." )
3671 withdrawResult = main.FALSE
3672 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003673 for ctrl in main.Cluster.active():
3674 if oldLeader == ctrl.ipAddress:
3675 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003676 break
3677 else: # FOR/ELSE statement
3678 main.log.error( "Leader election, could not find current leader" )
3679 if oldLeader:
3680 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3681 utilities.assert_equals(
3682 expect=main.TRUE,
3683 actual=withdrawResult,
3684 onpass="Node was withdrawn from election",
3685 onfail="Node was not withdrawn from election" )
3686
3687 main.step( "Check that a new node was elected leader" )
3688 failMessage = "Nodes have different leaders"
3689 # Get new leaders and candidates
3690 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3691 newLeader = None
3692 if newLeaderResult:
3693 if newLeaders[ 0 ][ 0 ] == 'none':
3694 main.log.error( "No leader was elected on at least 1 node" )
3695 if not expectNoLeader:
3696 newLeaderResult = False
3697 newLeader = newLeaders[ 0 ][ 0 ]
3698
3699 # Check that the new leader is not the older leader, which was withdrawn
3700 if newLeader == oldLeader:
3701 newLeaderResult = False
3702 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3703 " as the current leader" )
3704 utilities.assert_equals(
3705 expect=True,
3706 actual=newLeaderResult,
3707 onpass="Leadership election passed",
3708 onfail="Something went wrong with Leadership election" )
3709
3710 main.step( "Check that that new leader was the candidate of old leader" )
3711 # candidates[ 2 ] should become the top candidate after withdrawl
3712 correctCandidateResult = main.TRUE
3713 if expectNoLeader:
3714 if newLeader == 'none':
3715 main.log.info( "No leader expected. None found. Pass" )
3716 correctCandidateResult = main.TRUE
3717 else:
3718 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3719 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003720 utilities.assert_equals(
3721 expect=main.TRUE,
3722 actual=correctCandidateResult,
3723 onpass="Correct Candidate Elected",
3724 onfail="Incorrect Candidate Elected" )
3725
3726 main.step( "Run for election on old leader( just so everyone " +
3727 "is in the hat )" )
3728 if oldLeaderCLI is not None:
3729 runResult = oldLeaderCLI.electionTestRun()
3730 else:
3731 main.log.error( "No old leader to re-elect" )
3732 runResult = main.FALSE
3733 utilities.assert_equals(
3734 expect=main.TRUE,
3735 actual=runResult,
3736 onpass="App re-ran for election",
3737 onfail="App failed to run for election" )
3738
3739 main.step(
3740 "Check that oldLeader is a candidate, and leader if only 1 node" )
3741 # verify leader didn't just change
3742 # Get new leaders and candidates
3743 reRunLeaders = []
3744 time.sleep( 5 ) # Paremterize
3745 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3746
Devin Lim58046fa2017-07-05 16:55:00 -07003747 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003748 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003749 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003750 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003751 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003752 assert main, "main not defined"
3753 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003754
3755 # Variables for the distributed primitives tests
3756 main.pCounterName = "TestON-Partitions"
3757 main.pCounterValue = 0
3758 main.onosSet = set( [] )
3759 main.onosSetName = "TestON-set"
3760
3761 description = "Install Primitives app"
3762 main.case( description )
3763 main.step( "Install Primitives app" )
3764 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003765 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003766 utilities.assert_equals( expect=main.TRUE,
3767 actual=appResults,
3768 onpass="Primitives app activated",
3769 onfail="Primitives app not activated" )
3770 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003771 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003772
3773 def upgradeInit( self, main ):
3774 '''
3775 Initiates an update
3776 '''
3777 main.step( "Send the command to initialize the upgrade" )
3778 ctrl = main.Cluster.next().CLI
3779 initialized = ctrl.issuInit()
3780 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3781 onpass="ISSU initialized",
3782 onfail="Error initializing the upgrade" )
3783
3784 main.step( "Check the status of the upgrade" )
3785 ctrl = main.Cluster.next().CLI
3786 status = ctrl.issu()
3787 main.log.debug( status )
3788 # TODO: check things here?
3789
3790 main.step( "Checking ONOS nodes" )
3791 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3792 False,
3793 sleep=15,
3794 attempts=5 )
3795 utilities.assert_equals( expect=True, actual=nodeResults,
3796 onpass="Nodes check successful",
3797 onfail="Nodes check NOT successful" )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003798
3799 def backupData( self, main, location ):
3800 """
3801 Backs up ONOS data and logs to a given location on each active node in a cluster
3802 """
3803 result = True
3804 for ctrl in main.Cluster.active():
3805 try:
3806 ctrl.server.handle.sendline( "rm " + location )
3807 ctrl.server.handle.expect( ctrl.server.prompt )
3808 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3809 except pexpect.ExceptionPexpect as e:
3810 main.log.error( e )
3811 main.cleanAndExit()
3812 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3813 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3814 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3815 return result
3816
3817 def restoreData( self, main, location ):
3818 """
3819 Restores ONOS data and logs from a given location on each node in a cluster
3820 """
3821 result = True
3822 for ctrl in main.Cluster.controllers:
3823 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3824 return result