blob: c75ccdf4f88116ca7ceabce96346a13cbec8b80d [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070031
Devin Lim58046fa2017-07-05 16:55:00 -070032 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070033 # copy gen-partions file to ONOS
34 # NOTE: this assumes TestON and ONOS are on the same machine
35 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
36 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
37 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
38 main.ONOSbench.ip_address,
39 srcFile,
40 dstDir,
41 pwd=main.ONOSbench.pwd,
42 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070043
Devin Lim58046fa2017-07-05 16:55:00 -070044 def cleanUpGenPartition( self ):
45 # clean up gen-partitions file
46 try:
47 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
50 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
51 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
52 str( main.ONOSbench.handle.before ) )
53 except ( pexpect.TIMEOUT, pexpect.EOF ):
54 main.log.exception( "ONOSbench: pexpect exception found:" +
55 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070056 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070057
Devin Lim58046fa2017-07-05 16:55:00 -070058 def startingMininet( self ):
59 main.step( "Starting Mininet" )
60 # scp topo file to mininet
61 # TODO: move to params?
62 topoName = "obelisk.py"
63 filePath = main.ONOSbench.home + "/tools/test/topos/"
64 main.ONOSbench.scp( main.Mininet1,
65 filePath + topoName,
66 main.Mininet1.home,
67 direction="to" )
68 mnResult = main.Mininet1.startNet()
69 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
70 onpass="Mininet Started",
71 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070072
Devin Lim58046fa2017-07-05 16:55:00 -070073 def scalingMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070074 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070075 main.scaling = main.params[ 'scaling' ].split( "," )
76 main.log.debug( main.scaling )
77 scale = main.scaling.pop( 0 )
78 main.log.debug( scale )
79 if "e" in scale:
80 equal = True
81 else:
82 equal = False
83 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070084 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
85 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070086 utilities.assert_equals( expect=main.TRUE, actual=genResult,
87 onpass="New cluster metadata file generated",
88 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070089
Devin Lim58046fa2017-07-05 16:55:00 -070090 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070091 main.step( "Generate initial metadata file" )
92 if main.Cluster.numCtrls >= 5:
93 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070094 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070096 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070097 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim142b5342017-07-20 15:22:39 -0700101 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
Jon Hall4f360bc2017-09-07 10:19:52 -0700115 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700116 main.step( "Copying backup config files" )
117 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
118 cp = main.ONOSbench.scp( main.ONOSbench,
119 main.onosServicepath,
120 main.onosServicepath + ".backup",
121 direction="to" )
122
123 utilities.assert_equals( expect=main.TRUE,
124 actual=cp,
125 onpass="Copy backup config file succeeded",
126 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700127
128 def setMetadataUrl( self ):
129 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700130 # we need to modify the onos-service file to use remote metadata file
131 # url for cluster metadata file
132 iface = main.params[ 'server' ].get( 'interface' )
133 ip = main.ONOSbench.getIpAddr( iface=iface )
134 metaFile = "cluster.json"
135 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
136 main.log.warn( javaArgs )
137 main.log.warn( repr( javaArgs ) )
138 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700139 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
140 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700141 main.log.warn( sed )
142 main.log.warn( repr( sed ) )
143 handle.sendline( sed )
144 handle.expect( metaFile )
145 output = handle.before
146 handle.expect( "\$" )
147 output += handle.before
148 main.log.debug( repr( output ) )
149
150 def cleanUpOnosService( self ):
151 # Cleanup custom onos-service file
152 main.ONOSbench.scp( main.ONOSbench,
153 main.onosServicepath + ".backup",
154 main.onosServicepath,
155 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700156
Jon Halla440e872016-03-31 15:15:50 -0700157 def consistentCheck( self ):
158 """
159 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700160
Jon Hallf37d44d2017-05-24 10:37:30 -0700161 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700162 - onosCounters is the parsed json output of the counters command on
163 all nodes
164 - consistent is main.TRUE if all "TestON" counters are consitent across
165 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700166 """
Jon Halle1a3b752015-07-22 13:02:46 -0700167 try:
Jon Halla440e872016-03-31 15:15:50 -0700168 # Get onos counters results
169 onosCountersRaw = []
170 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700171 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700172 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700173 name="counters-" + str( ctrl ),
174 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700175 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700176 'randomTime': True } )
177 threads.append( t )
178 t.start()
179 for t in threads:
180 t.join()
181 onosCountersRaw.append( t.result )
182 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700183 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700184 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700185 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700186 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700187 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700188 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700189 main.log.warn( repr( onosCountersRaw[ i ] ) )
190 onosCounters.append( [] )
191
192 testCounters = {}
193 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700194 # lookes like a dict whose keys are the name of the ONOS node and
195 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700196 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700197 # }
198 # NOTE: There is an assumtion that all nodes are active
199 # based on the above for loops
200 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700202 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700203 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700204 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700205 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700206 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700207 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700208 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700209 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700210 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
211 if all( tmp ):
212 consistent = main.TRUE
213 else:
214 consistent = main.FALSE
215 main.log.error( "ONOS nodes have different values for counters:\n" +
216 testCounters )
217 return ( onosCounters, consistent )
218 except Exception:
219 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700220 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700221
222 def counterCheck( self, counterName, counterValue ):
223 """
224 Checks that TestON counters are consistent across all nodes and that
225 specified counter is in ONOS with the given value
226 """
227 try:
228 correctResults = main.TRUE
229 # Get onos counters results and consistentCheck
230 onosCounters, consistent = self.consistentCheck()
231 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700232 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700234 onosValue = None
235 try:
236 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700237 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700238 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700239 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700240 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700241 correctResults = main.FALSE
242 if onosValue == counterValue:
243 main.log.info( counterName + " counter value is correct" )
244 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700245 main.log.error( counterName +
246 " counter value is incorrect," +
247 " expected value: " + str( counterValue ) +
248 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700249 correctResults = main.FALSE
250 return consistent and correctResults
251 except Exception:
252 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700253 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700254
255 def consistentLeaderboards( self, nodes ):
256 TOPIC = 'org.onosproject.election'
257 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700258 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700259 for n in range( 5 ): # Retry in case election is still happening
260 leaderList = []
261 # Get all leaderboards
262 for cli in nodes:
263 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
264 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700265 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700266 leaderList is not None
267 main.log.debug( leaderList )
268 main.log.warn( result )
269 if result:
270 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700271 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700272 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
273 return ( result, leaderList )
274
Devin Lim58046fa2017-07-05 16:55:00 -0700275 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
Jeremy Ronquillo7f8fb572017-11-14 08:28:41 -0800276 # DEPRECATED: ONOSSetup.py now creates these graphs.
277
278 main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
Jon Hallca319892017-06-15 15:25:22 -0700279
Devin Lim58046fa2017-07-05 16:55:00 -0700280 def initialSetUp( self, serviceClean=False ):
281 """
282 rest of initialSetup
283 """
Devin Lim58046fa2017-07-05 16:55:00 -0700284 if main.params[ 'tcpdump' ].lower() == "true":
285 main.step( "Start Packet Capture MN" )
286 main.Mininet2.startTcpdump(
287 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
288 + "-MN.pcap",
289 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
290 port=main.params[ 'MNtcpdump' ][ 'port' ] )
291
292 if serviceClean:
293 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700294 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
295 main.ONOSbench.handle.expect( "\$" )
296 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
297 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700298
299 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800300 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700301 False,
Jon Hall5d5876e2017-11-30 09:33:16 -0800302 attempts=9 )
Devin Lim58046fa2017-07-05 16:55:00 -0700303
304 utilities.assert_equals( expect=True, actual=nodeResults,
305 onpass="Nodes check successful",
306 onfail="Nodes check NOT successful" )
307
308 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700309 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700310 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700311 ctrl.name,
312 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700313 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700314 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700315
316 main.step( "Activate apps defined in the params file" )
317 # get data from the params
318 apps = main.params.get( 'apps' )
319 if apps:
320 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700321 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700322 activateResult = True
323 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700324 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700325 # TODO: check this worked
326 time.sleep( 10 ) # wait for apps to activate
327 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700328 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700329 if state == "ACTIVE":
330 activateResult = activateResult and True
331 else:
332 main.log.error( "{} is in {} state".format( app, state ) )
333 activateResult = False
334 utilities.assert_equals( expect=True,
335 actual=activateResult,
336 onpass="Successfully activated apps",
337 onfail="Failed to activate apps" )
338 else:
339 main.log.warn( "No apps were specified to be loaded after startup" )
340
341 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700342 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700343 config = main.params.get( 'ONOS_Configuration' )
344 if config:
345 main.log.debug( config )
346 checkResult = main.TRUE
347 for component in config:
348 for setting in config[ component ]:
349 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700350 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700351 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
352 checkResult = check and checkResult
353 utilities.assert_equals( expect=main.TRUE,
354 actual=checkResult,
355 onpass="Successfully set config",
356 onfail="Failed to set config" )
357 else:
358 main.log.warn( "No configurations were specified to be changed after startup" )
359
Jon Hallca319892017-06-15 15:25:22 -0700360 main.step( "Check app ids" )
361 appCheck = self.appCheck()
362 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700363 onpass="App Ids seem to be correct",
364 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700365
Jon Hallca319892017-06-15 15:25:22 -0700366 def commonChecks( self ):
367 # TODO: make this assertable or assert in here?
368 self.topicsCheck()
369 self.partitionsCheck()
370 self.pendingMapCheck()
371 self.appCheck()
372
373 def topicsCheck( self, extraTopics=[] ):
374 """
375 Check for work partition topics in leaders output
376 """
377 leaders = main.Cluster.next().leaders()
378 missing = False
379 try:
380 if leaders:
381 parsedLeaders = json.loads( leaders )
382 output = json.dumps( parsedLeaders,
383 sort_keys=True,
384 indent=4,
385 separators=( ',', ': ' ) )
386 main.log.debug( "Leaders: " + output )
387 # check for all intent partitions
388 topics = []
389 for i in range( 14 ):
390 topics.append( "work-partition-" + str( i ) )
391 topics += extraTopics
392 main.log.debug( topics )
393 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
394 for topic in topics:
395 if topic not in ONOStopics:
396 main.log.error( "Error: " + topic +
397 " not in leaders" )
398 missing = True
399 else:
400 main.log.error( "leaders() returned None" )
401 except ( ValueError, TypeError ):
402 main.log.exception( "Error parsing leaders" )
403 main.log.error( repr( leaders ) )
404 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700405 # NOTE Can we refactor this into the Cluster class?
406 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700407 for ctrl in main.Cluster.active():
408 response = ctrl.CLI.leaders( jsonFormat=False )
409 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
410 str( response ) )
411 return missing
412
413 def partitionsCheck( self ):
414 # TODO: return something assertable
415 partitions = main.Cluster.next().partitions()
416 try:
417 if partitions:
418 parsedPartitions = json.loads( partitions )
419 output = json.dumps( parsedPartitions,
420 sort_keys=True,
421 indent=4,
422 separators=( ',', ': ' ) )
423 main.log.debug( "Partitions: " + output )
424 # TODO check for a leader in all paritions
425 # TODO check for consistency among nodes
426 else:
427 main.log.error( "partitions() returned None" )
428 except ( ValueError, TypeError ):
429 main.log.exception( "Error parsing partitions" )
430 main.log.error( repr( partitions ) )
431
432 def pendingMapCheck( self ):
433 pendingMap = main.Cluster.next().pendingMap()
434 try:
435 if pendingMap:
436 parsedPending = json.loads( pendingMap )
437 output = json.dumps( parsedPending,
438 sort_keys=True,
439 indent=4,
440 separators=( ',', ': ' ) )
441 main.log.debug( "Pending map: " + output )
442 # TODO check something here?
443 else:
444 main.log.error( "pendingMap() returned None" )
445 except ( ValueError, TypeError ):
446 main.log.exception( "Error parsing pending map" )
447 main.log.error( repr( pendingMap ) )
448
449 def appCheck( self ):
450 """
451 Check App IDs on all nodes
452 """
453 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
454 appResults = main.Cluster.command( "appToIDCheck" )
455 appCheck = all( i == main.TRUE for i in appResults )
456 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700457 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700458 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
459 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
460 return appCheck
461
Jon Halle0f0b342017-04-18 11:43:47 -0700462 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
463 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700464 completedValues = main.Cluster.command( "workQueueTotalCompleted",
465 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700466 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700467 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700468 completedResult = all( completedResults )
469 if not completedResult:
470 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
471 workQueueName, completed, completedValues ) )
472
473 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700474 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
475 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700476 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700477 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700478 inProgressResult = all( inProgressResults )
479 if not inProgressResult:
480 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
481 workQueueName, inProgress, inProgressValues ) )
482
483 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700484 pendingValues = main.Cluster.command( "workQueueTotalPending",
485 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700486 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700487 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700488 pendingResult = all( pendingResults )
489 if not pendingResult:
490 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
491 workQueueName, pending, pendingValues ) )
492 return completedResult and inProgressResult and pendingResult
493
Devin Lim58046fa2017-07-05 16:55:00 -0700494 def assignDevices( self, main ):
495 """
496 Assign devices to controllers
497 """
498 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700499 assert main, "main not defined"
500 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700501
502 main.case( "Assigning devices to controllers" )
503 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
504 "and check that an ONOS node becomes the " + \
505 "master of the device."
506 main.step( "Assign switches to controllers" )
507
Jon Hallca319892017-06-15 15:25:22 -0700508 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700509 swList = []
510 for i in range( 1, 29 ):
511 swList.append( "s" + str( i ) )
512 main.Mininet1.assignSwController( sw=swList, ip=ipList )
513
514 mastershipCheck = main.TRUE
515 for i in range( 1, 29 ):
516 response = main.Mininet1.getSwController( "s" + str( i ) )
517 try:
518 main.log.info( str( response ) )
519 except Exception:
520 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700521 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700522 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700523 mastershipCheck = mastershipCheck and main.TRUE
524 else:
Jon Hall4173b242017-09-12 17:04:38 -0700525 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700526 "not in the list of controllers s" +
527 str( i ) + " is connecting to." )
528 mastershipCheck = main.FALSE
529 utilities.assert_equals(
530 expect=main.TRUE,
531 actual=mastershipCheck,
532 onpass="Switch mastership assigned correctly",
533 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700534
Devin Lim58046fa2017-07-05 16:55:00 -0700535 def assignIntents( self, main ):
536 """
537 Assign intents
538 """
539 import time
540 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700541 assert main, "main not defined"
542 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700543 try:
544 main.HAlabels
545 except ( NameError, AttributeError ):
546 main.log.error( "main.HAlabels not defined, setting to []" )
547 main.HAlabels = []
548 try:
549 main.HAdata
550 except ( NameError, AttributeError ):
551 main.log.error( "data not defined, setting to []" )
552 main.HAdata = []
553 main.case( "Adding host Intents" )
554 main.caseExplanation = "Discover hosts by using pingall then " +\
555 "assign predetermined host-to-host intents." +\
556 " After installation, check that the intent" +\
557 " is distributed to all nodes and the state" +\
558 " is INSTALLED"
559
560 # install onos-app-fwd
561 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700562 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700563 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700564 utilities.assert_equals( expect=main.TRUE, actual=installResults,
565 onpass="Install fwd successful",
566 onfail="Install fwd failed" )
567
568 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700569 appCheck = self.appCheck()
570 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Discovering Hosts( Via pingall for now )" )
575 # FIXME: Once we have a host discovery mechanism, use that instead
576 # REACTIVE FWD test
577 pingResult = main.FALSE
578 passMsg = "Reactive Pingall test passed"
579 time1 = time.time()
580 pingResult = main.Mininet1.pingall()
581 time2 = time.time()
582 if not pingResult:
583 main.log.warn( "First pingall failed. Trying again..." )
584 pingResult = main.Mininet1.pingall()
585 passMsg += " on the second try"
586 utilities.assert_equals(
587 expect=main.TRUE,
588 actual=pingResult,
589 onpass=passMsg,
590 onfail="Reactive Pingall failed, " +
591 "one or more ping pairs failed" )
592 main.log.info( "Time for pingall: %2f seconds" %
593 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700594 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700595 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700596 # timeout for fwd flows
597 time.sleep( 11 )
598 # uninstall onos-app-fwd
599 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700600 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700601 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
602 onpass="Uninstall fwd successful",
603 onfail="Uninstall fwd failed" )
604
605 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700606 appCheck2 = self.appCheck()
607 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700608 onpass="App Ids seem to be correct",
609 onfail="Something is wrong with app Ids" )
610
611 main.step( "Add host intents via cli" )
612 intentIds = []
613 # TODO: move the host numbers to params
614 # Maybe look at all the paths we ping?
615 intentAddResult = True
616 hostResult = main.TRUE
617 for i in range( 8, 18 ):
618 main.log.info( "Adding host intent between h" + str( i ) +
619 " and h" + str( i + 10 ) )
620 host1 = "00:00:00:00:00:" + \
621 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
622 host2 = "00:00:00:00:00:" + \
623 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
624 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700625 host1Dict = onosCli.CLI.getHost( host1 )
626 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700627 host1Id = None
628 host2Id = None
629 if host1Dict and host2Dict:
630 host1Id = host1Dict.get( 'id', None )
631 host2Id = host2Dict.get( 'id', None )
632 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700633 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700634 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700635 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700636 if tmpId:
637 main.log.info( "Added intent with id: " + tmpId )
638 intentIds.append( tmpId )
639 else:
640 main.log.error( "addHostIntent returned: " +
641 repr( tmpId ) )
642 else:
643 main.log.error( "Error, getHost() failed for h" + str( i ) +
644 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700645 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700646 try:
Jon Hallca319892017-06-15 15:25:22 -0700647 output = json.dumps( json.loads( hosts ),
648 sort_keys=True,
649 indent=4,
650 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700651 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700652 output = repr( hosts )
653 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700654 hostResult = main.FALSE
655 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
656 onpass="Found a host id for each host",
657 onfail="Error looking up host ids" )
658
659 intentStart = time.time()
660 onosIds = onosCli.getAllIntentsId()
661 main.log.info( "Submitted intents: " + str( intentIds ) )
662 main.log.info( "Intents in ONOS: " + str( onosIds ) )
663 for intent in intentIds:
664 if intent in onosIds:
665 pass # intent submitted is in onos
666 else:
667 intentAddResult = False
668 if intentAddResult:
669 intentStop = time.time()
670 else:
671 intentStop = None
672 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700673 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700674 intentStates = []
675 installedCheck = True
676 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
677 count = 0
678 try:
679 for intent in json.loads( intents ):
680 state = intent.get( 'state', None )
681 if "INSTALLED" not in state:
682 installedCheck = False
683 intentId = intent.get( 'id', None )
684 intentStates.append( ( intentId, state ) )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing intents" )
687 # add submitted intents not in the store
688 tmplist = [ i for i, s in intentStates ]
689 missingIntents = False
690 for i in intentIds:
691 if i not in tmplist:
692 intentStates.append( ( i, " - " ) )
693 missingIntents = True
694 intentStates.sort()
695 for i, s in intentStates:
696 count += 1
697 main.log.info( "%-6s%-15s%-15s" %
698 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700699 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700700
701 intentAddResult = bool( intentAddResult and not missingIntents and
702 installedCheck )
703 if not intentAddResult:
704 main.log.error( "Error in pushing host intents to ONOS" )
705
706 main.step( "Intent Anti-Entropy dispersion" )
707 for j in range( 100 ):
708 correct = True
709 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700710 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700711 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700712 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700713 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700714 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700715 str( sorted( onosIds ) ) )
716 if sorted( ids ) != sorted( intentIds ):
717 main.log.warn( "Set of intent IDs doesn't match" )
718 correct = False
719 break
720 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700721 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700722 for intent in intents:
723 if intent[ 'state' ] != "INSTALLED":
724 main.log.warn( "Intent " + intent[ 'id' ] +
725 " is " + intent[ 'state' ] )
726 correct = False
727 break
728 if correct:
729 break
730 else:
731 time.sleep( 1 )
732 if not intentStop:
733 intentStop = time.time()
734 global gossipTime
735 gossipTime = intentStop - intentStart
736 main.log.info( "It took about " + str( gossipTime ) +
737 " seconds for all intents to appear in each node" )
738 append = False
739 title = "Gossip Intents"
740 count = 1
741 while append is False:
742 curTitle = title + str( count )
743 if curTitle not in main.HAlabels:
744 main.HAlabels.append( curTitle )
745 main.HAdata.append( str( gossipTime ) )
746 append = True
747 else:
748 count += 1
749 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700750 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700751 utilities.assert_greater_equals(
752 expect=maxGossipTime, actual=gossipTime,
753 onpass="ECM anti-entropy for intents worked within " +
754 "expected time",
755 onfail="Intent ECM anti-entropy took too long. " +
756 "Expected time:{}, Actual time:{}".format( maxGossipTime,
757 gossipTime ) )
758 if gossipTime <= maxGossipTime:
759 intentAddResult = True
760
Jon Hallca319892017-06-15 15:25:22 -0700761 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700762 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700763 installedCheck = True
764 main.log.info( "Sleeping 60 seconds to see if intents are found" )
765 time.sleep( 60 )
766 onosIds = onosCli.getAllIntentsId()
767 main.log.info( "Submitted intents: " + str( intentIds ) )
768 main.log.info( "Intents in ONOS: " + str( onosIds ) )
769 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700770 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700771 intentStates = []
772 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
773 count = 0
774 try:
775 for intent in json.loads( intents ):
776 # Iter through intents of a node
777 state = intent.get( 'state', None )
778 if "INSTALLED" not in state:
779 installedCheck = False
780 intentId = intent.get( 'id', None )
781 intentStates.append( ( intentId, state ) )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing intents" )
784 # add submitted intents not in the store
785 tmplist = [ i for i, s in intentStates ]
786 for i in intentIds:
787 if i not in tmplist:
788 intentStates.append( ( i, " - " ) )
789 intentStates.sort()
790 for i, s in intentStates:
791 count += 1
792 main.log.info( "%-6s%-15s%-15s" %
793 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700794 self.topicsCheck( [ "org.onosproject.election" ] )
795 self.partitionsCheck()
796 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700797
Jon Hallca319892017-06-15 15:25:22 -0700798 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700799 """
800 Ping across added host intents
801 """
802 import json
803 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700804 assert main, "main not defined"
805 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700806 main.case( "Verify connectivity by sending traffic across Intents" )
807 main.caseExplanation = "Ping across added host intents to check " +\
808 "functionality and check the state of " +\
809 "the intent"
810
Jon Hallca319892017-06-15 15:25:22 -0700811 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700812 main.step( "Check Intent state" )
813 installedCheck = False
814 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800815 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700816 installedCheck = True
817 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700818 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700819 intentStates = []
820 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
821 count = 0
822 # Iter through intents of a node
823 try:
824 for intent in json.loads( intents ):
825 state = intent.get( 'state', None )
826 if "INSTALLED" not in state:
827 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700828 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700829 intentId = intent.get( 'id', None )
830 intentStates.append( ( intentId, state ) )
831 except ( ValueError, TypeError ):
832 main.log.exception( "Error parsing intents." )
833 # Print states
834 intentStates.sort()
835 for i, s in intentStates:
836 count += 1
837 main.log.info( "%-6s%-15s%-15s" %
838 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700839 if not installedCheck:
840 time.sleep( 1 )
841 loopCount += 1
842 utilities.assert_equals( expect=True, actual=installedCheck,
843 onpass="Intents are all INSTALLED",
844 onfail="Intents are not all in " +
845 "INSTALLED state" )
846
847 main.step( "Ping across added host intents" )
848 PingResult = main.TRUE
849 for i in range( 8, 18 ):
850 ping = main.Mininet1.pingHost( src="h" + str( i ),
851 target="h" + str( i + 10 ) )
852 PingResult = PingResult and ping
853 if ping == main.FALSE:
854 main.log.warn( "Ping failed between h" + str( i ) +
855 " and h" + str( i + 10 ) )
856 elif ping == main.TRUE:
857 main.log.info( "Ping test passed!" )
858 # Don't set PingResult or you'd override failures
859 if PingResult == main.FALSE:
860 main.log.error(
861 "Intents have not been installed correctly, pings failed." )
862 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700863 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700864 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700865 output = json.dumps( json.loads( tmpIntents ),
866 sort_keys=True,
867 indent=4,
868 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700869 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700870 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700871 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700872 utilities.assert_equals(
873 expect=main.TRUE,
874 actual=PingResult,
875 onpass="Intents have been installed correctly and pings work",
876 onfail="Intents have not been installed correctly, pings failed." )
877
878 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700879 topicsCheck = self.topicsCheck()
880 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700881 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700882 onfail="Some topics were lost" )
883 self.partitionsCheck()
884 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700885
886 if not installedCheck:
887 main.log.info( "Waiting 60 seconds to see if the state of " +
888 "intents change" )
889 time.sleep( 60 )
890 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700891 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700892 intentStates = []
893 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
894 count = 0
895 # Iter through intents of a node
896 try:
897 for intent in json.loads( intents ):
898 state = intent.get( 'state', None )
899 if "INSTALLED" not in state:
900 installedCheck = False
901 intentId = intent.get( 'id', None )
902 intentStates.append( ( intentId, state ) )
903 except ( ValueError, TypeError ):
904 main.log.exception( "Error parsing intents." )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700910 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700911
Devin Lim58046fa2017-07-05 16:55:00 -0700912 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700913 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700914 main.step( "Wait a minute then ping again" )
915 # the wait is above
916 PingResult = main.TRUE
917 for i in range( 8, 18 ):
918 ping = main.Mininet1.pingHost( src="h" + str( i ),
919 target="h" + str( i + 10 ) )
920 PingResult = PingResult and ping
921 if ping == main.FALSE:
922 main.log.warn( "Ping failed between h" + str( i ) +
923 " and h" + str( i + 10 ) )
924 elif ping == main.TRUE:
925 main.log.info( "Ping test passed!" )
926 # Don't set PingResult or you'd override failures
927 if PingResult == main.FALSE:
928 main.log.error(
929 "Intents have not been installed correctly, pings failed." )
930 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700931 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700932 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700933 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700934 main.log.warn( json.dumps( json.loads( tmpIntents ),
935 sort_keys=True,
936 indent=4,
937 separators=( ',', ': ' ) ) )
938 except ( ValueError, TypeError ):
939 main.log.warn( repr( tmpIntents ) )
940 utilities.assert_equals(
941 expect=main.TRUE,
942 actual=PingResult,
943 onpass="Intents have been installed correctly and pings work",
944 onfail="Intents have not been installed correctly, pings failed." )
945
Devin Lim142b5342017-07-20 15:22:39 -0700946 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700947 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700948 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700949 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700950 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700951 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700952 actual=rolesNotNull,
953 onpass="Each device has a master",
954 onfail="Some devices don't have a master assigned" )
955
Devin Lim142b5342017-07-20 15:22:39 -0700956 def checkTheRole( self ):
957 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700958 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700959 consistentMastership = True
960 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700961 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700962 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700963 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700964 main.log.error( "Error in getting " + node + " roles" )
965 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700966 repr( ONOSMastership[ i ] ) )
967 rolesResults = False
968 utilities.assert_equals(
969 expect=True,
970 actual=rolesResults,
971 onpass="No error in reading roles output",
972 onfail="Error in reading roles from ONOS" )
973
974 main.step( "Check for consistency in roles from each controller" )
975 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
976 main.log.info(
977 "Switch roles are consistent across all ONOS nodes" )
978 else:
979 consistentMastership = False
980 utilities.assert_equals(
981 expect=True,
982 actual=consistentMastership,
983 onpass="Switch roles are consistent across all ONOS nodes",
984 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700985 return ONOSMastership, rolesResults, consistentMastership
986
987 def checkingIntents( self ):
988 main.step( "Get the intents from each controller" )
989 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
990 intentsResults = True
991 for i in range( len( ONOSIntents ) ):
992 node = str( main.Cluster.active( i ) )
993 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
994 main.log.error( "Error in getting " + node + " intents" )
995 main.log.warn( node + " intents response: " +
996 repr( ONOSIntents[ i ] ) )
997 intentsResults = False
998 utilities.assert_equals(
999 expect=True,
1000 actual=intentsResults,
1001 onpass="No error in reading intents output",
1002 onfail="Error in reading intents from ONOS" )
1003 return ONOSIntents, intentsResults
1004
1005 def readingState( self, main ):
1006 """
1007 Reading state of ONOS
1008 """
1009 import json
Devin Lim142b5342017-07-20 15:22:39 -07001010 assert main, "main not defined"
1011 assert utilities.assert_equals, "utilities.assert_equals not defined"
1012 try:
1013 from tests.dependencies.topology import Topology
1014 except ImportError:
1015 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001016 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001017 try:
1018 main.topoRelated
1019 except ( NameError, AttributeError ):
1020 main.topoRelated = Topology()
1021 main.case( "Setting up and gathering data for current state" )
1022 # The general idea for this test case is to pull the state of
1023 # ( intents,flows, topology,... ) from each ONOS node
1024 # We can then compare them with each other and also with past states
1025
1026 global mastershipState
1027 mastershipState = '[]'
1028
1029 self.checkRoleNotNull()
1030
1031 main.step( "Get the Mastership of each switch from each controller" )
1032 mastershipCheck = main.FALSE
1033
1034 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001035
1036 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001037 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001038 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001039 try:
1040 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001041 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001042 json.dumps(
1043 json.loads( ONOSMastership[ i ] ),
1044 sort_keys=True,
1045 indent=4,
1046 separators=( ',', ': ' ) ) )
1047 except ( ValueError, TypeError ):
1048 main.log.warn( repr( ONOSMastership[ i ] ) )
1049 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001050 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001051 mastershipState = ONOSMastership[ 0 ]
1052
Devin Lim58046fa2017-07-05 16:55:00 -07001053 global intentState
1054 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001055 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001056 intentCheck = main.FALSE
1057 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001058
Devin Lim58046fa2017-07-05 16:55:00 -07001059 main.step( "Check for consistency in Intents from each controller" )
1060 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1061 main.log.info( "Intents are consistent across all ONOS " +
1062 "nodes" )
1063 else:
1064 consistentIntents = False
1065 main.log.error( "Intents not consistent" )
1066 utilities.assert_equals(
1067 expect=True,
1068 actual=consistentIntents,
1069 onpass="Intents are consistent across all ONOS nodes",
1070 onfail="ONOS nodes have different views of intents" )
1071
1072 if intentsResults:
1073 # Try to make it easy to figure out what is happening
1074 #
1075 # Intent ONOS1 ONOS2 ...
1076 # 0x01 INSTALLED INSTALLING
1077 # ... ... ...
1078 # ... ... ...
1079 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001080 for ctrl in main.Cluster.active():
1081 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001082 main.log.warn( title )
1083 # get all intent keys in the cluster
1084 keys = []
1085 try:
1086 # Get the set of all intent keys
1087 for nodeStr in ONOSIntents:
1088 node = json.loads( nodeStr )
1089 for intent in node:
1090 keys.append( intent.get( 'id' ) )
1091 keys = set( keys )
1092 # For each intent key, print the state on each node
1093 for key in keys:
1094 row = "%-13s" % key
1095 for nodeStr in ONOSIntents:
1096 node = json.loads( nodeStr )
1097 for intent in node:
1098 if intent.get( 'id', "Error" ) == key:
1099 row += "%-15s" % intent.get( 'state' )
1100 main.log.warn( row )
1101 # End of intent state table
1102 except ValueError as e:
1103 main.log.exception( e )
1104 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1105
1106 if intentsResults and not consistentIntents:
1107 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001108 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001109 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001114 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001115 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001116 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001117 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1118 sort_keys=True,
1119 indent=4,
1120 separators=( ',', ': ' ) ) )
1121 else:
Jon Hallca319892017-06-15 15:25:22 -07001122 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001123 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001124 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001125 intentState = ONOSIntents[ 0 ]
1126
1127 main.step( "Get the flows from each controller" )
1128 global flowState
1129 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001130 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001131 ONOSFlowsJson = []
1132 flowCheck = main.FALSE
1133 consistentFlows = True
1134 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001135 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001136 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001137 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001138 main.log.error( "Error in getting " + node + " flows" )
1139 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001140 repr( ONOSFlows[ i ] ) )
1141 flowsResults = False
1142 ONOSFlowsJson.append( None )
1143 else:
1144 try:
1145 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1146 except ( ValueError, TypeError ):
1147 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001148 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001149 " response as json." )
1150 main.log.error( repr( ONOSFlows[ i ] ) )
1151 ONOSFlowsJson.append( None )
1152 flowsResults = False
1153 utilities.assert_equals(
1154 expect=True,
1155 actual=flowsResults,
1156 onpass="No error in reading flows output",
1157 onfail="Error in reading flows from ONOS" )
1158
1159 main.step( "Check for consistency in Flows from each controller" )
1160 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1161 if all( tmp ):
1162 main.log.info( "Flow count is consistent across all ONOS nodes" )
1163 else:
1164 consistentFlows = False
1165 utilities.assert_equals(
1166 expect=True,
1167 actual=consistentFlows,
1168 onpass="The flow count is consistent across all ONOS nodes",
1169 onfail="ONOS nodes have different flow counts" )
1170
1171 if flowsResults and not consistentFlows:
1172 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001173 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001174 try:
1175 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001176 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001177 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1178 indent=4, separators=( ',', ': ' ) ) )
1179 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001180 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001181 repr( ONOSFlows[ i ] ) )
1182 elif flowsResults and consistentFlows:
1183 flowCheck = main.TRUE
1184 flowState = ONOSFlows[ 0 ]
1185
1186 main.step( "Get the OF Table entries" )
1187 global flows
1188 flows = []
1189 for i in range( 1, 29 ):
1190 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1191 if flowCheck == main.FALSE:
1192 for table in flows:
1193 main.log.warn( table )
1194 # TODO: Compare switch flow tables with ONOS flow tables
1195
1196 main.step( "Start continuous pings" )
1197 main.Mininet2.pingLong(
1198 src=main.params[ 'PING' ][ 'source1' ],
1199 target=main.params[ 'PING' ][ 'target1' ],
1200 pingTime=500 )
1201 main.Mininet2.pingLong(
1202 src=main.params[ 'PING' ][ 'source2' ],
1203 target=main.params[ 'PING' ][ 'target2' ],
1204 pingTime=500 )
1205 main.Mininet2.pingLong(
1206 src=main.params[ 'PING' ][ 'source3' ],
1207 target=main.params[ 'PING' ][ 'target3' ],
1208 pingTime=500 )
1209 main.Mininet2.pingLong(
1210 src=main.params[ 'PING' ][ 'source4' ],
1211 target=main.params[ 'PING' ][ 'target4' ],
1212 pingTime=500 )
1213 main.Mininet2.pingLong(
1214 src=main.params[ 'PING' ][ 'source5' ],
1215 target=main.params[ 'PING' ][ 'target5' ],
1216 pingTime=500 )
1217 main.Mininet2.pingLong(
1218 src=main.params[ 'PING' ][ 'source6' ],
1219 target=main.params[ 'PING' ][ 'target6' ],
1220 pingTime=500 )
1221 main.Mininet2.pingLong(
1222 src=main.params[ 'PING' ][ 'source7' ],
1223 target=main.params[ 'PING' ][ 'target7' ],
1224 pingTime=500 )
1225 main.Mininet2.pingLong(
1226 src=main.params[ 'PING' ][ 'source8' ],
1227 target=main.params[ 'PING' ][ 'target8' ],
1228 pingTime=500 )
1229 main.Mininet2.pingLong(
1230 src=main.params[ 'PING' ][ 'source9' ],
1231 target=main.params[ 'PING' ][ 'target9' ],
1232 pingTime=500 )
1233 main.Mininet2.pingLong(
1234 src=main.params[ 'PING' ][ 'source10' ],
1235 target=main.params[ 'PING' ][ 'target10' ],
1236 pingTime=500 )
1237
1238 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001239 devices = main.topoRelated.getAll( "devices" )
1240 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1241 ports = main.topoRelated.getAll( "ports" )
1242 links = main.topoRelated.getAll( "links" )
1243 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001244 # Compare json objects for hosts and dataplane clusters
1245
1246 # hosts
1247 main.step( "Host view is consistent across ONOS nodes" )
1248 consistentHostsResult = main.TRUE
1249 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001250 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001251 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1252 if hosts[ controller ] == hosts[ 0 ]:
1253 continue
1254 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001255 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001256 controllerStr +
1257 " is inconsistent with ONOS1" )
1258 main.log.warn( repr( hosts[ controller ] ) )
1259 consistentHostsResult = main.FALSE
1260
1261 else:
Jon Hallca319892017-06-15 15:25:22 -07001262 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001263 controllerStr )
1264 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001265 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001266 " hosts response: " +
1267 repr( hosts[ controller ] ) )
1268 utilities.assert_equals(
1269 expect=main.TRUE,
1270 actual=consistentHostsResult,
1271 onpass="Hosts view is consistent across all ONOS nodes",
1272 onfail="ONOS nodes have different views of hosts" )
1273
1274 main.step( "Each host has an IP address" )
1275 ipResult = main.TRUE
1276 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001277 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001278 if hosts[ controller ]:
1279 for host in hosts[ controller ]:
1280 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001281 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001282 controllerStr + ": " + str( host ) )
1283 ipResult = main.FALSE
1284 utilities.assert_equals(
1285 expect=main.TRUE,
1286 actual=ipResult,
1287 onpass="The ips of the hosts aren't empty",
1288 onfail="The ip of at least one host is missing" )
1289
1290 # Strongly connected clusters of devices
1291 main.step( "Cluster view is consistent across ONOS nodes" )
1292 consistentClustersResult = main.TRUE
1293 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001294 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001295 if "Error" not in clusters[ controller ]:
1296 if clusters[ controller ] == clusters[ 0 ]:
1297 continue
1298 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001299 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001300 " is inconsistent with ONOS1" )
1301 consistentClustersResult = main.FALSE
1302
1303 else:
1304 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001305 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001306 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001307 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001308 " clusters response: " +
1309 repr( clusters[ controller ] ) )
1310 utilities.assert_equals(
1311 expect=main.TRUE,
1312 actual=consistentClustersResult,
1313 onpass="Clusters view is consistent across all ONOS nodes",
1314 onfail="ONOS nodes have different views of clusters" )
1315 if not consistentClustersResult:
1316 main.log.debug( clusters )
1317
1318 # there should always only be one cluster
1319 main.step( "Cluster view correct across ONOS nodes" )
1320 try:
1321 numClusters = len( json.loads( clusters[ 0 ] ) )
1322 except ( ValueError, TypeError ):
1323 main.log.exception( "Error parsing clusters[0]: " +
1324 repr( clusters[ 0 ] ) )
1325 numClusters = "ERROR"
1326 utilities.assert_equals(
1327 expect=1,
1328 actual=numClusters,
1329 onpass="ONOS shows 1 SCC",
1330 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1331
1332 main.step( "Comparing ONOS topology to MN" )
1333 devicesResults = main.TRUE
1334 linksResults = main.TRUE
1335 hostsResults = main.TRUE
1336 mnSwitches = main.Mininet1.getSwitches()
1337 mnLinks = main.Mininet1.getLinks()
1338 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001339 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001340 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001341 currentDevicesResult = main.topoRelated.compareDevicePort(
1342 main.Mininet1, controller,
1343 mnSwitches, devices, ports )
1344 utilities.assert_equals( expect=main.TRUE,
1345 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001346 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001347 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001348 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001349 " Switches view is incorrect" )
1350
1351 currentLinksResult = main.topoRelated.compareBase( links, controller,
1352 main.Mininet1.compareLinks,
1353 [ mnSwitches, mnLinks ] )
1354 utilities.assert_equals( expect=main.TRUE,
1355 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001356 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001357 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001358 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001359 " links view is incorrect" )
1360
1361 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1362 currentHostsResult = main.Mininet1.compareHosts(
1363 mnHosts,
1364 hosts[ controller ] )
1365 else:
1366 currentHostsResult = main.FALSE
1367 utilities.assert_equals( expect=main.TRUE,
1368 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001369 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001370 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001371 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001372 " hosts don't match Mininet" )
1373
1374 devicesResults = devicesResults and currentDevicesResult
1375 linksResults = linksResults and currentLinksResult
1376 hostsResults = hostsResults and currentHostsResult
1377
1378 main.step( "Device information is correct" )
1379 utilities.assert_equals(
1380 expect=main.TRUE,
1381 actual=devicesResults,
1382 onpass="Device information is correct",
1383 onfail="Device information is incorrect" )
1384
1385 main.step( "Links are correct" )
1386 utilities.assert_equals(
1387 expect=main.TRUE,
1388 actual=linksResults,
1389 onpass="Link are correct",
1390 onfail="Links are incorrect" )
1391
1392 main.step( "Hosts are correct" )
1393 utilities.assert_equals(
1394 expect=main.TRUE,
1395 actual=hostsResults,
1396 onpass="Hosts are correct",
1397 onfail="Hosts are incorrect" )
1398
1399 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001400 """
1401 Check for basic functionality with distributed primitives
1402 """
Jon Halle0f0b342017-04-18 11:43:47 -07001403 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001404 try:
1405 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001406 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001407 assert main.pCounterName, "main.pCounterName not defined"
1408 assert main.onosSetName, "main.onosSetName not defined"
1409 # NOTE: assert fails if value is 0/None/Empty/False
1410 try:
1411 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001412 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001413 main.log.error( "main.pCounterValue not defined, setting to 0" )
1414 main.pCounterValue = 0
1415 try:
1416 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001417 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001418 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001419 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001420 # Variables for the distributed primitives tests. These are local only
1421 addValue = "a"
1422 addAllValue = "a b c d e f"
1423 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001424 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001425 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001426 workQueueName = "TestON-Queue"
1427 workQueueCompleted = 0
1428 workQueueInProgress = 0
1429 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001430
1431 description = "Check for basic functionality with distributed " +\
1432 "primitives"
1433 main.case( description )
1434 main.caseExplanation = "Test the methods of the distributed " +\
1435 "primitives (counters and sets) throught the cli"
1436 # DISTRIBUTED ATOMIC COUNTERS
1437 # Partitioned counters
1438 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001439 pCounters = main.Cluster.command( "counterTestAddAndGet",
1440 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001441 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001442 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001443 main.pCounterValue += 1
1444 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001445 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001446 pCounterResults = True
1447 for i in addedPValues:
1448 tmpResult = i in pCounters
1449 pCounterResults = pCounterResults and tmpResult
1450 if not tmpResult:
1451 main.log.error( str( i ) + " is not in partitioned "
1452 "counter incremented results" )
1453 utilities.assert_equals( expect=True,
1454 actual=pCounterResults,
1455 onpass="Default counter incremented",
1456 onfail="Error incrementing default" +
1457 " counter" )
1458
1459 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001460 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1461 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001462 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001463 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001464 addedPValues.append( main.pCounterValue )
1465 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001466 # Check that counter incremented numController times
1467 pCounterResults = True
1468 for i in addedPValues:
1469 tmpResult = i in pCounters
1470 pCounterResults = pCounterResults and tmpResult
1471 if not tmpResult:
1472 main.log.error( str( i ) + " is not in partitioned "
1473 "counter incremented results" )
1474 utilities.assert_equals( expect=True,
1475 actual=pCounterResults,
1476 onpass="Default counter incremented",
1477 onfail="Error incrementing default" +
1478 " counter" )
1479
1480 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001481 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001482 utilities.assert_equals( expect=main.TRUE,
1483 actual=incrementCheck,
1484 onpass="Added counters are correct",
1485 onfail="Added counters are incorrect" )
1486
1487 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001488 pCounters = main.Cluster.command( "counterTestAddAndGet",
1489 args=[ main.pCounterName ],
1490 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001491 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001492 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001493 main.pCounterValue += -8
1494 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001495 # Check that counter incremented numController times
1496 pCounterResults = True
1497 for i in addedPValues:
1498 tmpResult = i in pCounters
1499 pCounterResults = pCounterResults and tmpResult
1500 if not tmpResult:
1501 main.log.error( str( i ) + " is not in partitioned "
1502 "counter incremented results" )
1503 utilities.assert_equals( expect=True,
1504 actual=pCounterResults,
1505 onpass="Default counter incremented",
1506 onfail="Error incrementing default" +
1507 " counter" )
1508
1509 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001510 pCounters = main.Cluster.command( "counterTestAddAndGet",
1511 args=[ main.pCounterName ],
1512 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001513 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001514 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001515 main.pCounterValue += 5
1516 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001517
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001518 # Check that counter incremented numController times
1519 pCounterResults = True
1520 for i in addedPValues:
1521 tmpResult = i in pCounters
1522 pCounterResults = pCounterResults and tmpResult
1523 if not tmpResult:
1524 main.log.error( str( i ) + " is not in partitioned "
1525 "counter incremented results" )
1526 utilities.assert_equals( expect=True,
1527 actual=pCounterResults,
1528 onpass="Default counter incremented",
1529 onfail="Error incrementing default" +
1530 " counter" )
1531
1532 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001533 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1534 args=[ main.pCounterName ],
1535 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001536 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001537 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001538 addedPValues.append( main.pCounterValue )
1539 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001540 # Check that counter incremented numController times
1541 pCounterResults = True
1542 for i in addedPValues:
1543 tmpResult = i in pCounters
1544 pCounterResults = pCounterResults and tmpResult
1545 if not tmpResult:
1546 main.log.error( str( i ) + " is not in partitioned "
1547 "counter incremented results" )
1548 utilities.assert_equals( expect=True,
1549 actual=pCounterResults,
1550 onpass="Default counter incremented",
1551 onfail="Error incrementing default" +
1552 " counter" )
1553
1554 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001555 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001556 utilities.assert_equals( expect=main.TRUE,
1557 actual=incrementCheck,
1558 onpass="Added counters are correct",
1559 onfail="Added counters are incorrect" )
1560
1561 # DISTRIBUTED SETS
1562 main.step( "Distributed Set get" )
1563 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001564 getResponses = main.Cluster.command( "setTestGet",
1565 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001567 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001568 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001569 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001570 current = set( getResponses[ i ] )
1571 if len( current ) == len( getResponses[ i ] ):
1572 # no repeats
1573 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001574 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001575 " has incorrect view" +
1576 " of set " + main.onosSetName + ":\n" +
1577 str( getResponses[ i ] ) )
1578 main.log.debug( "Expected: " + str( main.onosSet ) )
1579 main.log.debug( "Actual: " + str( current ) )
1580 getResults = main.FALSE
1581 else:
1582 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001583 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 " has repeat elements in" +
1585 " set " + main.onosSetName + ":\n" +
1586 str( getResponses[ i ] ) )
1587 getResults = main.FALSE
1588 elif getResponses[ i ] == main.ERROR:
1589 getResults = main.FALSE
1590 utilities.assert_equals( expect=main.TRUE,
1591 actual=getResults,
1592 onpass="Set elements are correct",
1593 onfail="Set elements are incorrect" )
1594
1595 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001596 sizeResponses = main.Cluster.command( "setTestSize",
1597 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001598 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001599 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001600 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001601 if size != sizeResponses[ i ]:
1602 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001603 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001604 " expected a size of " + str( size ) +
1605 " for set " + main.onosSetName +
1606 " but got " + str( sizeResponses[ i ] ) )
1607 utilities.assert_equals( expect=main.TRUE,
1608 actual=sizeResults,
1609 onpass="Set sizes are correct",
1610 onfail="Set sizes are incorrect" )
1611
1612 main.step( "Distributed Set add()" )
1613 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001614 addResponses = main.Cluster.command( "setTestAdd",
1615 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001616 # main.TRUE = successfully changed the set
1617 # main.FALSE = action resulted in no change in set
1618 # main.ERROR - Some error in executing the function
1619 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001620 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001621 if addResponses[ i ] == main.TRUE:
1622 # All is well
1623 pass
1624 elif addResponses[ i ] == main.FALSE:
1625 # Already in set, probably fine
1626 pass
1627 elif addResponses[ i ] == main.ERROR:
1628 # Error in execution
1629 addResults = main.FALSE
1630 else:
1631 # unexpected result
1632 addResults = main.FALSE
1633 if addResults != main.TRUE:
1634 main.log.error( "Error executing set add" )
1635
1636 # Check if set is still correct
1637 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001638 getResponses = main.Cluster.command( "setTestGet",
1639 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001640 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001641 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001642 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001643 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001644 current = set( getResponses[ i ] )
1645 if len( current ) == len( getResponses[ i ] ):
1646 # no repeats
1647 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001648 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001649 " of set " + main.onosSetName + ":\n" +
1650 str( getResponses[ i ] ) )
1651 main.log.debug( "Expected: " + str( main.onosSet ) )
1652 main.log.debug( "Actual: " + str( current ) )
1653 getResults = main.FALSE
1654 else:
1655 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001656 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001657 " set " + main.onosSetName + ":\n" +
1658 str( getResponses[ i ] ) )
1659 getResults = main.FALSE
1660 elif getResponses[ i ] == main.ERROR:
1661 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001662 sizeResponses = main.Cluster.command( "setTestSize",
1663 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001664 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001665 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001666 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001667 if size != sizeResponses[ i ]:
1668 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001669 main.log.error( node + " expected a size of " +
1670 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001671 " but got " + str( sizeResponses[ i ] ) )
1672 addResults = addResults and getResults and sizeResults
1673 utilities.assert_equals( expect=main.TRUE,
1674 actual=addResults,
1675 onpass="Set add correct",
1676 onfail="Set add was incorrect" )
1677
1678 main.step( "Distributed Set addAll()" )
1679 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001680 addResponses = main.Cluster.command( "setTestAdd",
1681 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001682 # main.TRUE = successfully changed the set
1683 # main.FALSE = action resulted in no change in set
1684 # main.ERROR - Some error in executing the function
1685 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001686 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001687 if addResponses[ i ] == main.TRUE:
1688 # All is well
1689 pass
1690 elif addResponses[ i ] == main.FALSE:
1691 # Already in set, probably fine
1692 pass
1693 elif addResponses[ i ] == main.ERROR:
1694 # Error in execution
1695 addAllResults = main.FALSE
1696 else:
1697 # unexpected result
1698 addAllResults = main.FALSE
1699 if addAllResults != main.TRUE:
1700 main.log.error( "Error executing set addAll" )
1701
1702 # Check if set is still correct
1703 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001704 getResponses = main.Cluster.command( "setTestGet",
1705 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001706 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001707 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001708 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001709 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 current = set( getResponses[ i ] )
1711 if len( current ) == len( getResponses[ i ] ):
1712 # no repeats
1713 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001714 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 " of set " + main.onosSetName + ":\n" +
1716 str( getResponses[ i ] ) )
1717 main.log.debug( "Expected: " + str( main.onosSet ) )
1718 main.log.debug( "Actual: " + str( current ) )
1719 getResults = main.FALSE
1720 else:
1721 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001722 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001723 " set " + main.onosSetName + ":\n" +
1724 str( getResponses[ i ] ) )
1725 getResults = main.FALSE
1726 elif getResponses[ i ] == main.ERROR:
1727 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001728 sizeResponses = main.Cluster.command( "setTestSize",
1729 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001730 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001731 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001732 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001733 if size != sizeResponses[ i ]:
1734 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001735 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001736 " for set " + main.onosSetName +
1737 " but got " + str( sizeResponses[ i ] ) )
1738 addAllResults = addAllResults and getResults and sizeResults
1739 utilities.assert_equals( expect=main.TRUE,
1740 actual=addAllResults,
1741 onpass="Set addAll correct",
1742 onfail="Set addAll was incorrect" )
1743
1744 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001745 containsResponses = main.Cluster.command( "setTestGet",
1746 args=[ main.onosSetName ],
1747 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001748 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001749 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001750 if containsResponses[ i ] == main.ERROR:
1751 containsResults = main.FALSE
1752 else:
1753 containsResults = containsResults and\
1754 containsResponses[ i ][ 1 ]
1755 utilities.assert_equals( expect=main.TRUE,
1756 actual=containsResults,
1757 onpass="Set contains is functional",
1758 onfail="Set contains failed" )
1759
1760 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001761 containsAllResponses = main.Cluster.command( "setTestGet",
1762 args=[ main.onosSetName ],
1763 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001764 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001765 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001766 if containsResponses[ i ] == main.ERROR:
1767 containsResults = main.FALSE
1768 else:
1769 containsResults = containsResults and\
1770 containsResponses[ i ][ 1 ]
1771 utilities.assert_equals( expect=main.TRUE,
1772 actual=containsAllResults,
1773 onpass="Set containsAll is functional",
1774 onfail="Set containsAll failed" )
1775
1776 main.step( "Distributed Set remove()" )
1777 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001778 removeResponses = main.Cluster.command( "setTestRemove",
1779 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 # main.TRUE = successfully changed the set
1781 # main.FALSE = action resulted in no change in set
1782 # main.ERROR - Some error in executing the function
1783 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001784 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001785 if removeResponses[ i ] == main.TRUE:
1786 # All is well
1787 pass
1788 elif removeResponses[ i ] == main.FALSE:
1789 # not in set, probably fine
1790 pass
1791 elif removeResponses[ i ] == main.ERROR:
1792 # Error in execution
1793 removeResults = main.FALSE
1794 else:
1795 # unexpected result
1796 removeResults = main.FALSE
1797 if removeResults != main.TRUE:
1798 main.log.error( "Error executing set remove" )
1799
1800 # Check if set is still correct
1801 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001802 getResponses = main.Cluster.command( "setTestGet",
1803 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001804 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001805 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001806 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001807 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001808 current = set( getResponses[ i ] )
1809 if len( current ) == len( getResponses[ i ] ):
1810 # no repeats
1811 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001812 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001813 " of set " + main.onosSetName + ":\n" +
1814 str( getResponses[ i ] ) )
1815 main.log.debug( "Expected: " + str( main.onosSet ) )
1816 main.log.debug( "Actual: " + str( current ) )
1817 getResults = main.FALSE
1818 else:
1819 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001820 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001821 " set " + main.onosSetName + ":\n" +
1822 str( getResponses[ i ] ) )
1823 getResults = main.FALSE
1824 elif getResponses[ i ] == main.ERROR:
1825 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001826 sizeResponses = main.Cluster.command( "setTestSize",
1827 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001828 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001829 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001830 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001831 if size != sizeResponses[ i ]:
1832 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001833 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001834 " for set " + main.onosSetName +
1835 " but got " + str( sizeResponses[ i ] ) )
1836 removeResults = removeResults and getResults and sizeResults
1837 utilities.assert_equals( expect=main.TRUE,
1838 actual=removeResults,
1839 onpass="Set remove correct",
1840 onfail="Set remove was incorrect" )
1841
1842 main.step( "Distributed Set removeAll()" )
1843 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001844 removeAllResponses = main.Cluster.command( "setTestRemove",
1845 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001846 # main.TRUE = successfully changed the set
1847 # main.FALSE = action resulted in no change in set
1848 # main.ERROR - Some error in executing the function
1849 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001850 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001851 if removeAllResponses[ i ] == main.TRUE:
1852 # All is well
1853 pass
1854 elif removeAllResponses[ i ] == main.FALSE:
1855 # not in set, probably fine
1856 pass
1857 elif removeAllResponses[ i ] == main.ERROR:
1858 # Error in execution
1859 removeAllResults = main.FALSE
1860 else:
1861 # unexpected result
1862 removeAllResults = main.FALSE
1863 if removeAllResults != main.TRUE:
1864 main.log.error( "Error executing set removeAll" )
1865
1866 # Check if set is still correct
1867 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001868 getResponses = main.Cluster.command( "setTestGet",
1869 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001870 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001871 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001872 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001873 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001874 current = set( getResponses[ i ] )
1875 if len( current ) == len( getResponses[ i ] ):
1876 # no repeats
1877 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001878 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001879 " of set " + main.onosSetName + ":\n" +
1880 str( getResponses[ i ] ) )
1881 main.log.debug( "Expected: " + str( main.onosSet ) )
1882 main.log.debug( "Actual: " + str( current ) )
1883 getResults = main.FALSE
1884 else:
1885 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001886 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001887 " set " + main.onosSetName + ":\n" +
1888 str( getResponses[ i ] ) )
1889 getResults = main.FALSE
1890 elif getResponses[ i ] == main.ERROR:
1891 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001892 sizeResponses = main.Cluster.command( "setTestSize",
1893 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001894 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001895 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001896 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001897 if size != sizeResponses[ i ]:
1898 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001899 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001900 " for set " + main.onosSetName +
1901 " but got " + str( sizeResponses[ i ] ) )
1902 removeAllResults = removeAllResults and getResults and sizeResults
1903 utilities.assert_equals( expect=main.TRUE,
1904 actual=removeAllResults,
1905 onpass="Set removeAll correct",
1906 onfail="Set removeAll was incorrect" )
1907
1908 main.step( "Distributed Set addAll()" )
1909 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001910 addResponses = main.Cluster.command( "setTestAdd",
1911 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001912 # main.TRUE = successfully changed the set
1913 # main.FALSE = action resulted in no change in set
1914 # main.ERROR - Some error in executing the function
1915 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001916 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001917 if addResponses[ i ] == main.TRUE:
1918 # All is well
1919 pass
1920 elif addResponses[ i ] == main.FALSE:
1921 # Already in set, probably fine
1922 pass
1923 elif addResponses[ i ] == main.ERROR:
1924 # Error in execution
1925 addAllResults = main.FALSE
1926 else:
1927 # unexpected result
1928 addAllResults = main.FALSE
1929 if addAllResults != main.TRUE:
1930 main.log.error( "Error executing set addAll" )
1931
1932 # Check if set is still correct
1933 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001934 getResponses = main.Cluster.command( "setTestGet",
1935 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001936 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001937 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001938 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001939 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001940 current = set( getResponses[ i ] )
1941 if len( current ) == len( getResponses[ i ] ):
1942 # no repeats
1943 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001944 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001945 " of set " + main.onosSetName + ":\n" +
1946 str( getResponses[ i ] ) )
1947 main.log.debug( "Expected: " + str( main.onosSet ) )
1948 main.log.debug( "Actual: " + str( current ) )
1949 getResults = main.FALSE
1950 else:
1951 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001952 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001953 " set " + main.onosSetName + ":\n" +
1954 str( getResponses[ i ] ) )
1955 getResults = main.FALSE
1956 elif getResponses[ i ] == main.ERROR:
1957 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001958 sizeResponses = main.Cluster.command( "setTestSize",
1959 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001960 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001961 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001962 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001963 if size != sizeResponses[ i ]:
1964 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001965 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001966 " for set " + main.onosSetName +
1967 " but got " + str( sizeResponses[ i ] ) )
1968 addAllResults = addAllResults and getResults and sizeResults
1969 utilities.assert_equals( expect=main.TRUE,
1970 actual=addAllResults,
1971 onpass="Set addAll correct",
1972 onfail="Set addAll was incorrect" )
1973
1974 main.step( "Distributed Set clear()" )
1975 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001976 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001977 args=[ main.onosSetName, " " ], # Values doesn't matter
1978 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001979 # main.TRUE = successfully changed the set
1980 # main.FALSE = action resulted in no change in set
1981 # main.ERROR - Some error in executing the function
1982 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001983 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001984 if clearResponses[ i ] == main.TRUE:
1985 # All is well
1986 pass
1987 elif clearResponses[ i ] == main.FALSE:
1988 # Nothing set, probably fine
1989 pass
1990 elif clearResponses[ i ] == main.ERROR:
1991 # Error in execution
1992 clearResults = main.FALSE
1993 else:
1994 # unexpected result
1995 clearResults = main.FALSE
1996 if clearResults != main.TRUE:
1997 main.log.error( "Error executing set clear" )
1998
1999 # Check if set is still correct
2000 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002001 getResponses = main.Cluster.command( "setTestGet",
2002 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002003 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002004 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002005 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002006 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002007 current = set( getResponses[ i ] )
2008 if len( current ) == len( getResponses[ i ] ):
2009 # no repeats
2010 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002011 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002012 " of set " + main.onosSetName + ":\n" +
2013 str( getResponses[ i ] ) )
2014 main.log.debug( "Expected: " + str( main.onosSet ) )
2015 main.log.debug( "Actual: " + str( current ) )
2016 getResults = main.FALSE
2017 else:
2018 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002019 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002020 " set " + main.onosSetName + ":\n" +
2021 str( getResponses[ i ] ) )
2022 getResults = main.FALSE
2023 elif getResponses[ i ] == main.ERROR:
2024 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002025 sizeResponses = main.Cluster.command( "setTestSize",
2026 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002027 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002028 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002029 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002030 if size != sizeResponses[ i ]:
2031 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002032 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002033 " for set " + main.onosSetName +
2034 " but got " + str( sizeResponses[ i ] ) )
2035 clearResults = clearResults and getResults and sizeResults
2036 utilities.assert_equals( expect=main.TRUE,
2037 actual=clearResults,
2038 onpass="Set clear correct",
2039 onfail="Set clear was incorrect" )
2040
2041 main.step( "Distributed Set addAll()" )
2042 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002043 addResponses = main.Cluster.command( "setTestAdd",
2044 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002045 # main.TRUE = successfully changed the set
2046 # main.FALSE = action resulted in no change in set
2047 # main.ERROR - Some error in executing the function
2048 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002049 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002050 if addResponses[ i ] == main.TRUE:
2051 # All is well
2052 pass
2053 elif addResponses[ i ] == main.FALSE:
2054 # Already in set, probably fine
2055 pass
2056 elif addResponses[ i ] == main.ERROR:
2057 # Error in execution
2058 addAllResults = main.FALSE
2059 else:
2060 # unexpected result
2061 addAllResults = main.FALSE
2062 if addAllResults != main.TRUE:
2063 main.log.error( "Error executing set addAll" )
2064
2065 # Check if set is still correct
2066 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002067 getResponses = main.Cluster.command( "setTestGet",
2068 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002069 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002070 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002071 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002072 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002073 current = set( getResponses[ i ] )
2074 if len( current ) == len( getResponses[ i ] ):
2075 # no repeats
2076 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002077 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002078 " of set " + main.onosSetName + ":\n" +
2079 str( getResponses[ i ] ) )
2080 main.log.debug( "Expected: " + str( main.onosSet ) )
2081 main.log.debug( "Actual: " + str( current ) )
2082 getResults = main.FALSE
2083 else:
2084 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002085 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002086 " set " + main.onosSetName + ":\n" +
2087 str( getResponses[ i ] ) )
2088 getResults = main.FALSE
2089 elif getResponses[ i ] == main.ERROR:
2090 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002091 sizeResponses = main.Cluster.command( "setTestSize",
2092 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002093 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002094 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002095 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002096 if size != sizeResponses[ i ]:
2097 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002098 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002099 " for set " + main.onosSetName +
2100 " but got " + str( sizeResponses[ i ] ) )
2101 addAllResults = addAllResults and getResults and sizeResults
2102 utilities.assert_equals( expect=main.TRUE,
2103 actual=addAllResults,
2104 onpass="Set addAll correct",
2105 onfail="Set addAll was incorrect" )
2106
2107 main.step( "Distributed Set retain()" )
2108 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002109 retainResponses = main.Cluster.command( "setTestRemove",
2110 args=[ main.onosSetName, retainValue ],
2111 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002112 # main.TRUE = successfully changed the set
2113 # main.FALSE = action resulted in no change in set
2114 # main.ERROR - Some error in executing the function
2115 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002116 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002117 if retainResponses[ i ] == main.TRUE:
2118 # All is well
2119 pass
2120 elif retainResponses[ i ] == main.FALSE:
2121 # Already in set, probably fine
2122 pass
2123 elif retainResponses[ i ] == main.ERROR:
2124 # Error in execution
2125 retainResults = main.FALSE
2126 else:
2127 # unexpected result
2128 retainResults = main.FALSE
2129 if retainResults != main.TRUE:
2130 main.log.error( "Error executing set retain" )
2131
2132 # Check if set is still correct
2133 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002134 getResponses = main.Cluster.command( "setTestGet",
2135 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002136 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002137 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002138 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002139 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002140 current = set( getResponses[ i ] )
2141 if len( current ) == len( getResponses[ i ] ):
2142 # no repeats
2143 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002144 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002145 " of set " + main.onosSetName + ":\n" +
2146 str( getResponses[ i ] ) )
2147 main.log.debug( "Expected: " + str( main.onosSet ) )
2148 main.log.debug( "Actual: " + str( current ) )
2149 getResults = main.FALSE
2150 else:
2151 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002152 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002153 " set " + main.onosSetName + ":\n" +
2154 str( getResponses[ i ] ) )
2155 getResults = main.FALSE
2156 elif getResponses[ i ] == main.ERROR:
2157 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002158 sizeResponses = main.Cluster.command( "setTestSize",
2159 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002160 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002161 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002162 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002163 if size != sizeResponses[ i ]:
2164 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002165 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002166 str( size ) + " for set " + main.onosSetName +
2167 " but got " + str( sizeResponses[ i ] ) )
2168 retainResults = retainResults and getResults and sizeResults
2169 utilities.assert_equals( expect=main.TRUE,
2170 actual=retainResults,
2171 onpass="Set retain correct",
2172 onfail="Set retain was incorrect" )
2173
2174 # Transactional maps
2175 main.step( "Partitioned Transactional maps put" )
2176 tMapValue = "Testing"
2177 numKeys = 100
2178 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002179 ctrl = main.Cluster.next()
2180 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002181 if putResponses and len( putResponses ) == 100:
2182 for i in putResponses:
2183 if putResponses[ i ][ 'value' ] != tMapValue:
2184 putResult = False
2185 else:
2186 putResult = False
2187 if not putResult:
2188 main.log.debug( "Put response values: " + str( putResponses ) )
2189 utilities.assert_equals( expect=True,
2190 actual=putResult,
2191 onpass="Partitioned Transactional Map put successful",
2192 onfail="Partitioned Transactional Map put values are incorrect" )
2193
2194 main.step( "Partitioned Transactional maps get" )
2195 # FIXME: is this sleep needed?
2196 time.sleep( 5 )
2197
2198 getCheck = True
2199 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002200 getResponses = main.Cluster.command( "transactionalMapGet",
2201 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002202 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002203 for node in getResponses:
2204 if node != tMapValue:
2205 valueCheck = False
2206 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002207 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002208 main.log.warn( getResponses )
2209 getCheck = getCheck and valueCheck
2210 utilities.assert_equals( expect=True,
2211 actual=getCheck,
2212 onpass="Partitioned Transactional Map get values were correct",
2213 onfail="Partitioned Transactional Map values incorrect" )
2214
2215 # DISTRIBUTED ATOMIC VALUE
2216 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002217 getValues = main.Cluster.command( "valueTestGet",
2218 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002219 main.log.debug( getValues )
2220 # Check the results
2221 atomicValueGetResult = True
2222 expected = valueValue if valueValue is not None else "null"
2223 main.log.debug( "Checking for value of " + expected )
2224 for i in getValues:
2225 if i != expected:
2226 atomicValueGetResult = False
2227 utilities.assert_equals( expect=True,
2228 actual=atomicValueGetResult,
2229 onpass="Atomic Value get successful",
2230 onfail="Error getting atomic Value " +
2231 str( valueValue ) + ", found: " +
2232 str( getValues ) )
2233
2234 main.step( "Atomic Value set()" )
2235 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002236 setValues = main.Cluster.command( "valueTestSet",
2237 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002238 main.log.debug( setValues )
2239 # Check the results
2240 atomicValueSetResults = True
2241 for i in setValues:
2242 if i != main.TRUE:
2243 atomicValueSetResults = False
2244 utilities.assert_equals( expect=True,
2245 actual=atomicValueSetResults,
2246 onpass="Atomic Value set successful",
2247 onfail="Error setting atomic Value" +
2248 str( setValues ) )
2249
2250 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002251 getValues = main.Cluster.command( "valueTestGet",
2252 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002253 main.log.debug( getValues )
2254 # Check the results
2255 atomicValueGetResult = True
2256 expected = valueValue if valueValue is not None else "null"
2257 main.log.debug( "Checking for value of " + expected )
2258 for i in getValues:
2259 if i != expected:
2260 atomicValueGetResult = False
2261 utilities.assert_equals( expect=True,
2262 actual=atomicValueGetResult,
2263 onpass="Atomic Value get successful",
2264 onfail="Error getting atomic Value " +
2265 str( valueValue ) + ", found: " +
2266 str( getValues ) )
2267
2268 main.step( "Atomic Value compareAndSet()" )
2269 oldValue = valueValue
2270 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002271 ctrl = main.Cluster.next()
2272 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002273 main.log.debug( CASValue )
2274 utilities.assert_equals( expect=main.TRUE,
2275 actual=CASValue,
2276 onpass="Atomic Value comapreAndSet successful",
2277 onfail="Error setting atomic Value:" +
2278 str( CASValue ) )
2279
2280 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002281 getValues = main.Cluster.command( "valueTestGet",
2282 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002283 main.log.debug( getValues )
2284 # Check the results
2285 atomicValueGetResult = True
2286 expected = valueValue if valueValue is not None else "null"
2287 main.log.debug( "Checking for value of " + expected )
2288 for i in getValues:
2289 if i != expected:
2290 atomicValueGetResult = False
2291 utilities.assert_equals( expect=True,
2292 actual=atomicValueGetResult,
2293 onpass="Atomic Value get successful",
2294 onfail="Error getting atomic Value " +
2295 str( valueValue ) + ", found: " +
2296 str( getValues ) )
2297
2298 main.step( "Atomic Value getAndSet()" )
2299 oldValue = valueValue
2300 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002301 ctrl = main.Cluster.next()
2302 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002303 main.log.debug( GASValue )
2304 expected = oldValue if oldValue is not None else "null"
2305 utilities.assert_equals( expect=expected,
2306 actual=GASValue,
2307 onpass="Atomic Value GAS successful",
2308 onfail="Error with GetAndSet atomic Value: expected " +
2309 str( expected ) + ", found: " +
2310 str( GASValue ) )
2311
2312 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002313 getValues = main.Cluster.command( "valueTestGet",
2314 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002315 main.log.debug( getValues )
2316 # Check the results
2317 atomicValueGetResult = True
2318 expected = valueValue if valueValue is not None else "null"
2319 main.log.debug( "Checking for value of " + expected )
2320 for i in getValues:
2321 if i != expected:
2322 atomicValueGetResult = False
2323 utilities.assert_equals( expect=True,
2324 actual=atomicValueGetResult,
2325 onpass="Atomic Value get successful",
2326 onfail="Error getting atomic Value: expected " +
2327 str( valueValue ) + ", found: " +
2328 str( getValues ) )
2329
2330 main.step( "Atomic Value destory()" )
2331 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002332 ctrl = main.Cluster.next()
2333 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002334 main.log.debug( destroyResult )
2335 # Check the results
2336 utilities.assert_equals( expect=main.TRUE,
2337 actual=destroyResult,
2338 onpass="Atomic Value destroy successful",
2339 onfail="Error destroying atomic Value" )
2340
2341 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002342 getValues = main.Cluster.command( "valueTestGet",
2343 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002344 main.log.debug( getValues )
2345 # Check the results
2346 atomicValueGetResult = True
2347 expected = valueValue if valueValue is not None else "null"
2348 main.log.debug( "Checking for value of " + expected )
2349 for i in getValues:
2350 if i != expected:
2351 atomicValueGetResult = False
2352 utilities.assert_equals( expect=True,
2353 actual=atomicValueGetResult,
2354 onpass="Atomic Value get successful",
2355 onfail="Error getting atomic Value " +
2356 str( valueValue ) + ", found: " +
2357 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002358
2359 # WORK QUEUES
2360 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002361 ctrl = main.Cluster.next()
2362 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002363 workQueuePending += 1
2364 main.log.debug( addResult )
2365 # Check the results
2366 utilities.assert_equals( expect=main.TRUE,
2367 actual=addResult,
2368 onpass="Work Queue add successful",
2369 onfail="Error adding to Work Queue" )
2370
2371 main.step( "Check the work queue stats" )
2372 statsResults = self.workQueueStatsCheck( workQueueName,
2373 workQueueCompleted,
2374 workQueueInProgress,
2375 workQueuePending )
2376 utilities.assert_equals( expect=True,
2377 actual=statsResults,
2378 onpass="Work Queue stats correct",
2379 onfail="Work Queue stats incorrect " )
2380
2381 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002382 ctrl = main.Cluster.next()
2383 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002384 workQueuePending += 2
2385 main.log.debug( addMultipleResult )
2386 # Check the results
2387 utilities.assert_equals( expect=main.TRUE,
2388 actual=addMultipleResult,
2389 onpass="Work Queue add multiple successful",
2390 onfail="Error adding multiple items to Work Queue" )
2391
2392 main.step( "Check the work queue stats" )
2393 statsResults = self.workQueueStatsCheck( workQueueName,
2394 workQueueCompleted,
2395 workQueueInProgress,
2396 workQueuePending )
2397 utilities.assert_equals( expect=True,
2398 actual=statsResults,
2399 onpass="Work Queue stats correct",
2400 onfail="Work Queue stats incorrect " )
2401
2402 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002403 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002404 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002405 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002406 workQueuePending -= number
2407 workQueueCompleted += number
2408 main.log.debug( take1Result )
2409 # Check the results
2410 utilities.assert_equals( expect=main.TRUE,
2411 actual=take1Result,
2412 onpass="Work Queue takeAndComplete 1 successful",
2413 onfail="Error taking 1 from Work Queue" )
2414
2415 main.step( "Check the work queue stats" )
2416 statsResults = self.workQueueStatsCheck( workQueueName,
2417 workQueueCompleted,
2418 workQueueInProgress,
2419 workQueuePending )
2420 utilities.assert_equals( expect=True,
2421 actual=statsResults,
2422 onpass="Work Queue stats correct",
2423 onfail="Work Queue stats incorrect " )
2424
2425 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002426 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002427 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002428 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002429 workQueuePending -= number
2430 workQueueCompleted += number
2431 main.log.debug( take2Result )
2432 # Check the results
2433 utilities.assert_equals( expect=main.TRUE,
2434 actual=take2Result,
2435 onpass="Work Queue takeAndComplete 2 successful",
2436 onfail="Error taking 2 from Work Queue" )
2437
2438 main.step( "Check the work queue stats" )
2439 statsResults = self.workQueueStatsCheck( workQueueName,
2440 workQueueCompleted,
2441 workQueueInProgress,
2442 workQueuePending )
2443 utilities.assert_equals( expect=True,
2444 actual=statsResults,
2445 onpass="Work Queue stats correct",
2446 onfail="Work Queue stats incorrect " )
2447
2448 main.step( "Work Queue destroy()" )
2449 valueValue = None
2450 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002451 ctrl = main.Cluster.next()
2452 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002453 workQueueCompleted = 0
2454 workQueueInProgress = 0
2455 workQueuePending = 0
2456 main.log.debug( destroyResult )
2457 # Check the results
2458 utilities.assert_equals( expect=main.TRUE,
2459 actual=destroyResult,
2460 onpass="Work Queue destroy successful",
2461 onfail="Error destroying Work Queue" )
2462
2463 main.step( "Check the work queue stats" )
2464 statsResults = self.workQueueStatsCheck( workQueueName,
2465 workQueueCompleted,
2466 workQueueInProgress,
2467 workQueuePending )
2468 utilities.assert_equals( expect=True,
2469 actual=statsResults,
2470 onpass="Work Queue stats correct",
2471 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002472 except Exception as e:
2473 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002474
2475 def cleanUp( self, main ):
2476 """
2477 Clean up
2478 """
Devin Lim58046fa2017-07-05 16:55:00 -07002479 assert main, "main not defined"
2480 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002481
2482 # printing colors to terminal
2483 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2484 'blue': '\033[94m', 'green': '\033[92m',
2485 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002486
Devin Lim58046fa2017-07-05 16:55:00 -07002487 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002488
2489 main.step( "Checking raft log size" )
2490 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2491 # get compacted periodically
2492 logCheck = main.Cluster.checkPartitionSize()
2493 utilities.assert_equals( expect=True, actual=logCheck,
2494 onpass="Raft log size is not too big",
2495 onfail="Raft logs grew too big" )
2496
Devin Lim58046fa2017-07-05 16:55:00 -07002497 main.step( "Killing tcpdumps" )
2498 main.Mininet2.stopTcpdump()
2499
2500 testname = main.TEST
2501 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2502 main.step( "Copying MN pcap and ONOS log files to test station" )
2503 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2504 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2505 # NOTE: MN Pcap file is being saved to logdir.
2506 # We scp this file as MN and TestON aren't necessarily the same vm
2507
2508 # FIXME: To be replaced with a Jenkin's post script
2509 # TODO: Load these from params
2510 # NOTE: must end in /
2511 logFolder = "/opt/onos/log/"
2512 logFiles = [ "karaf.log", "karaf.log.1" ]
2513 # NOTE: must end in /
2514 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002515 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002516 dstName = main.logdir + "/" + ctrl.name + "-" + f
2517 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002518 logFolder + f, dstName )
2519 # std*.log's
2520 # NOTE: must end in /
2521 logFolder = "/opt/onos/var/"
2522 logFiles = [ "stderr.log", "stdout.log" ]
2523 # NOTE: must end in /
2524 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002525 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002526 dstName = main.logdir + "/" + ctrl.name + "-" + f
2527 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002528 logFolder + f, dstName )
2529 else:
2530 main.log.debug( "skipping saving log files" )
2531
Jon Hall5d5876e2017-11-30 09:33:16 -08002532 main.step( "Checking ONOS Logs for errors" )
2533 for ctrl in main.Cluster.runningNodes:
2534 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2535 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2536
Devin Lim58046fa2017-07-05 16:55:00 -07002537 main.step( "Stopping Mininet" )
2538 mnResult = main.Mininet1.stopNet()
2539 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2540 onpass="Mininet stopped",
2541 onfail="MN cleanup NOT successful" )
2542
Devin Lim58046fa2017-07-05 16:55:00 -07002543 try:
2544 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2545 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2546 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2547 timerLog.close()
2548 except NameError as e:
2549 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002550
Devin Lim58046fa2017-07-05 16:55:00 -07002551 def assignMastership( self, main ):
2552 """
2553 Assign mastership to controllers
2554 """
2555 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002556 assert main, "main not defined"
2557 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002558
2559 main.case( "Assigning Controller roles for switches" )
2560 main.caseExplanation = "Check that ONOS is connected to each " +\
2561 "device. Then manually assign" +\
2562 " mastership to specific ONOS nodes using" +\
2563 " 'device-role'"
2564 main.step( "Assign mastership of switches to specific controllers" )
2565 # Manually assign mastership to the controller we want
2566 roleCall = main.TRUE
2567
2568 ipList = []
2569 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002570 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002571 try:
2572 # Assign mastership to specific controllers. This assignment was
2573 # determined for a 7 node cluser, but will work with any sized
2574 # cluster
2575 for i in range( 1, 29 ): # switches 1 through 28
2576 # set up correct variables:
2577 if i == 1:
2578 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002579 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002580 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2581 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002582 c = 1 % main.Cluster.numCtrls
2583 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002584 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2585 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002586 c = 1 % main.Cluster.numCtrls
2587 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002588 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2589 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002590 c = 3 % main.Cluster.numCtrls
2591 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002592 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2593 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002594 c = 2 % main.Cluster.numCtrls
2595 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002596 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2597 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002598 c = 2 % main.Cluster.numCtrls
2599 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002600 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2601 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002602 c = 5 % main.Cluster.numCtrls
2603 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002604 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2605 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002606 c = 4 % main.Cluster.numCtrls
2607 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002608 dpid = '3' + str( i ).zfill( 3 )
2609 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2610 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002611 c = 6 % main.Cluster.numCtrls
2612 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002613 dpid = '6' + str( i ).zfill( 3 )
2614 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2615 elif i == 28:
2616 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002617 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002618 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2619 else:
2620 main.log.error( "You didn't write an else statement for " +
2621 "switch s" + str( i ) )
2622 roleCall = main.FALSE
2623 # Assign switch
2624 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2625 # TODO: make this controller dynamic
2626 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2627 ipList.append( ip )
2628 deviceList.append( deviceId )
2629 except ( AttributeError, AssertionError ):
2630 main.log.exception( "Something is wrong with ONOS device view" )
2631 main.log.info( onosCli.devices() )
2632 utilities.assert_equals(
2633 expect=main.TRUE,
2634 actual=roleCall,
2635 onpass="Re-assigned switch mastership to designated controller",
2636 onfail="Something wrong with deviceRole calls" )
2637
2638 main.step( "Check mastership was correctly assigned" )
2639 roleCheck = main.TRUE
2640 # NOTE: This is due to the fact that device mastership change is not
2641 # atomic and is actually a multi step process
2642 time.sleep( 5 )
2643 for i in range( len( ipList ) ):
2644 ip = ipList[ i ]
2645 deviceId = deviceList[ i ]
2646 # Check assignment
2647 master = onosCli.getRole( deviceId ).get( 'master' )
2648 if ip in master:
2649 roleCheck = roleCheck and main.TRUE
2650 else:
2651 roleCheck = roleCheck and main.FALSE
2652 main.log.error( "Error, controller " + ip + " is not" +
2653 " master " + "of device " +
2654 str( deviceId ) + ". Master is " +
2655 repr( master ) + "." )
2656 utilities.assert_equals(
2657 expect=main.TRUE,
2658 actual=roleCheck,
2659 onpass="Switches were successfully reassigned to designated " +
2660 "controller",
2661 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002662
Jon Hall5d5876e2017-11-30 09:33:16 -08002663 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002664 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002665 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002666 """
2667 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002668 assert main, "main not defined"
2669 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002670 assert main.kill, "main.kill not defined"
2671 main.case( "Restart minority of ONOS nodes" )
2672
2673 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2674 startResults = main.TRUE
2675 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002676 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002677 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002678 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002679 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2680 onpass="ONOS nodes started successfully",
2681 onfail="ONOS nodes NOT successfully started" )
2682
2683 main.step( "Checking if ONOS is up yet" )
2684 count = 0
2685 onosIsupResult = main.FALSE
2686 while onosIsupResult == main.FALSE and count < 10:
2687 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002688 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002689 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002690 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002691 count = count + 1
2692 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2693 onpass="ONOS restarted successfully",
2694 onfail="ONOS restart NOT successful" )
2695
Jon Hall5d5876e2017-11-30 09:33:16 -08002696 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002697 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002698 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002699 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002700 ctrl.startOnosCli( ctrl.ipAddress )
2701 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002702 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002703 onpass="ONOS node(s) restarted",
2704 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002705
Jon Hall5d5876e2017-11-30 09:33:16 -08002706 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002707 main.restartTime = time.time() - restartTime
2708 main.log.debug( "Restart time: " + str( main.restartTime ) )
2709 # TODO: MAke this configurable. Also, we are breaking the above timer
2710 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002711 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002712 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002713 sleep=15,
2714 attempts=5 )
2715
2716 utilities.assert_equals( expect=True, actual=nodeResults,
2717 onpass="Nodes check successful",
2718 onfail="Nodes check NOT successful" )
2719
2720 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002721 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002722 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002723 ctrl.name,
2724 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002725 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002726 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002727
Jon Hallca319892017-06-15 15:25:22 -07002728 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002729
2730 main.step( "Rerun for election on the node(s) that were killed" )
2731 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002732 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002733 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002734 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002735 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2736 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002737 onfail="Error rerunning for election" )
2738
2739 def upgradeNodes( self, main ):
2740 """
2741 Reinstall some nodes with an upgraded version.
2742
2743 This will reinstall nodes in main.kill with an upgraded version.
2744 """
2745 import time
2746 assert main, "main not defined"
2747 assert utilities.assert_equals, "utilities.assert_equals not defined"
2748 assert main.kill, "main.kill not defined"
2749 nodeNames = [ node.name for node in main.kill ]
2750 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2751
2752 stopResults = main.TRUE
2753 uninstallResults = main.TRUE
2754 startResults = main.TRUE
2755 sshResults = main.TRUE
2756 isup = main.TRUE
2757 restartTime = time.time()
2758 for ctrl in main.kill:
2759 stopResults = stopResults and\
2760 ctrl.onosStop( ctrl.ipAddress )
2761 uninstallResults = uninstallResults and\
2762 ctrl.onosUninstall( ctrl.ipAddress )
2763 # Install the new version of onos
2764 startResults = startResults and\
2765 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2766 sshResults = sshResults and\
2767 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2768 isup = isup and ctrl.isup( ctrl.ipAddress )
2769 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2770 onpass="ONOS nodes stopped successfully",
2771 onfail="ONOS nodes NOT successfully stopped" )
2772 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2773 onpass="ONOS nodes uninstalled successfully",
2774 onfail="ONOS nodes NOT successfully uninstalled" )
2775 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2776 onpass="ONOS nodes started successfully",
2777 onfail="ONOS nodes NOT successfully started" )
2778 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2779 onpass="Successfully secured onos ssh",
2780 onfail="Failed to secure onos ssh" )
2781 utilities.assert_equals( expect=main.TRUE, actual=isup,
2782 onpass="ONOS nodes fully started",
2783 onfail="ONOS nodes NOT fully started" )
2784
2785 main.step( "Restarting ONOS CLI" )
2786 cliResults = main.TRUE
2787 for ctrl in main.kill:
2788 cliResults = cliResults and\
2789 ctrl.startOnosCli( ctrl.ipAddress )
2790 ctrl.active = True
2791 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2792 onpass="ONOS node(s) restarted",
2793 onfail="ONOS node(s) did not restart" )
2794
2795 # Grab the time of restart so we can have some idea of average time
2796 main.restartTime = time.time() - restartTime
2797 main.log.debug( "Restart time: " + str( main.restartTime ) )
2798 # TODO: Make this configurable.
2799 main.step( "Checking ONOS nodes" )
2800 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2801 False,
2802 sleep=15,
2803 attempts=5 )
2804
2805 utilities.assert_equals( expect=True, actual=nodeResults,
2806 onpass="Nodes check successful",
2807 onfail="Nodes check NOT successful" )
2808
2809 if not nodeResults:
2810 for ctrl in main.Cluster.active():
2811 main.log.debug( "{} components not ACTIVE: \n{}".format(
2812 ctrl.name,
2813 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
2814 main.log.error( "Failed to start ONOS, stopping test" )
2815 main.cleanAndExit()
2816
2817 self.commonChecks()
2818
2819 main.step( "Rerun for election on the node(s) that were killed" )
2820 runResults = main.TRUE
2821 for ctrl in main.kill:
2822 runResults = runResults and\
2823 ctrl.electionTestRun()
2824 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2825 onpass="ONOS nodes reran for election topic",
2826 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002827
Devin Lim142b5342017-07-20 15:22:39 -07002828 def tempCell( self, cellName, ipList ):
2829 main.step( "Create cell file" )
2830 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002831
Devin Lim142b5342017-07-20 15:22:39 -07002832 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2833 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002834 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002835 main.step( "Applying cell variable to environment" )
2836 cellResult = main.ONOSbench.setCell( cellName )
2837 verifyResult = main.ONOSbench.verifyCell()
2838
Devin Lim142b5342017-07-20 15:22:39 -07002839 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002840 """
2841 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002842 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002843 1: scaling
2844 """
2845 """
2846 Check state after ONOS failure/scaling
2847 """
2848 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002849 assert main, "main not defined"
2850 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002851 main.case( "Running ONOS Constant State Tests" )
2852
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002853 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002854
Devin Lim58046fa2017-07-05 16:55:00 -07002855 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002856 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002857
Devin Lim142b5342017-07-20 15:22:39 -07002858 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002859 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002860
2861 if rolesResults and not consistentMastership:
2862 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002863 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002864 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002865 json.dumps( json.loads( ONOSMastership[ i ] ),
2866 sort_keys=True,
2867 indent=4,
2868 separators=( ',', ': ' ) ) )
2869
2870 if compareSwitch:
2871 description2 = "Compare switch roles from before failure"
2872 main.step( description2 )
2873 try:
2874 currentJson = json.loads( ONOSMastership[ 0 ] )
2875 oldJson = json.loads( mastershipState )
2876 except ( ValueError, TypeError ):
2877 main.log.exception( "Something is wrong with parsing " +
2878 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002879 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2880 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002881 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002882 mastershipCheck = main.TRUE
2883 for i in range( 1, 29 ):
2884 switchDPID = str(
2885 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2886 current = [ switch[ 'master' ] for switch in currentJson
2887 if switchDPID in switch[ 'id' ] ]
2888 old = [ switch[ 'master' ] for switch in oldJson
2889 if switchDPID in switch[ 'id' ] ]
2890 if current == old:
2891 mastershipCheck = mastershipCheck and main.TRUE
2892 else:
2893 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2894 mastershipCheck = main.FALSE
2895 utilities.assert_equals(
2896 expect=main.TRUE,
2897 actual=mastershipCheck,
2898 onpass="Mastership of Switches was not changed",
2899 onfail="Mastership of some switches changed" )
2900
2901 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002902 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002903 intentCheck = main.FALSE
2904 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002905
2906 main.step( "Check for consistency in Intents from each controller" )
2907 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2908 main.log.info( "Intents are consistent across all ONOS " +
2909 "nodes" )
2910 else:
2911 consistentIntents = False
2912
2913 # Try to make it easy to figure out what is happening
2914 #
2915 # Intent ONOS1 ONOS2 ...
2916 # 0x01 INSTALLED INSTALLING
2917 # ... ... ...
2918 # ... ... ...
2919 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002920 for ctrl in main.Cluster.active():
2921 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002922 main.log.warn( title )
2923 # get all intent keys in the cluster
2924 keys = []
2925 for nodeStr in ONOSIntents:
2926 node = json.loads( nodeStr )
2927 for intent in node:
2928 keys.append( intent.get( 'id' ) )
2929 keys = set( keys )
2930 for key in keys:
2931 row = "%-13s" % key
2932 for nodeStr in ONOSIntents:
2933 node = json.loads( nodeStr )
2934 for intent in node:
2935 if intent.get( 'id' ) == key:
2936 row += "%-15s" % intent.get( 'state' )
2937 main.log.warn( row )
2938 # End table view
2939
2940 utilities.assert_equals(
2941 expect=True,
2942 actual=consistentIntents,
2943 onpass="Intents are consistent across all ONOS nodes",
2944 onfail="ONOS nodes have different views of intents" )
2945 intentStates = []
2946 for node in ONOSIntents: # Iter through ONOS nodes
2947 nodeStates = []
2948 # Iter through intents of a node
2949 try:
2950 for intent in json.loads( node ):
2951 nodeStates.append( intent[ 'state' ] )
2952 except ( ValueError, TypeError ):
2953 main.log.exception( "Error in parsing intents" )
2954 main.log.error( repr( node ) )
2955 intentStates.append( nodeStates )
2956 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2957 main.log.info( dict( out ) )
2958
2959 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002960 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002961 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002962 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002963 main.log.warn( json.dumps(
2964 json.loads( ONOSIntents[ i ] ),
2965 sort_keys=True,
2966 indent=4,
2967 separators=( ',', ': ' ) ) )
2968 elif intentsResults and consistentIntents:
2969 intentCheck = main.TRUE
2970
2971 # NOTE: Store has no durability, so intents are lost across system
2972 # restarts
2973 if not isRestart:
2974 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2975 # NOTE: this requires case 5 to pass for intentState to be set.
2976 # maybe we should stop the test if that fails?
2977 sameIntents = main.FALSE
2978 try:
2979 intentState
2980 except NameError:
2981 main.log.warn( "No previous intent state was saved" )
2982 else:
2983 if intentState and intentState == ONOSIntents[ 0 ]:
2984 sameIntents = main.TRUE
2985 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2986 # TODO: possibly the states have changed? we may need to figure out
2987 # what the acceptable states are
2988 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2989 sameIntents = main.TRUE
2990 try:
2991 before = json.loads( intentState )
2992 after = json.loads( ONOSIntents[ 0 ] )
2993 for intent in before:
2994 if intent not in after:
2995 sameIntents = main.FALSE
2996 main.log.debug( "Intent is not currently in ONOS " +
2997 "(at least in the same form):" )
2998 main.log.debug( json.dumps( intent ) )
2999 except ( ValueError, TypeError ):
3000 main.log.exception( "Exception printing intents" )
3001 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3002 main.log.debug( repr( intentState ) )
3003 if sameIntents == main.FALSE:
3004 try:
3005 main.log.debug( "ONOS intents before: " )
3006 main.log.debug( json.dumps( json.loads( intentState ),
3007 sort_keys=True, indent=4,
3008 separators=( ',', ': ' ) ) )
3009 main.log.debug( "Current ONOS intents: " )
3010 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3011 sort_keys=True, indent=4,
3012 separators=( ',', ': ' ) ) )
3013 except ( ValueError, TypeError ):
3014 main.log.exception( "Exception printing intents" )
3015 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3016 main.log.debug( repr( intentState ) )
3017 utilities.assert_equals(
3018 expect=main.TRUE,
3019 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003020 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003021 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3022 intentCheck = intentCheck and sameIntents
3023
3024 main.step( "Get the OF Table entries and compare to before " +
3025 "component " + OnosAfterWhich[ afterWhich ] )
3026 FlowTables = main.TRUE
3027 for i in range( 28 ):
3028 main.log.info( "Checking flow table on s" + str( i + 1 ) )
3029 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
3030 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
3031 FlowTables = FlowTables and curSwitch
3032 if curSwitch == main.FALSE:
3033 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
3034 utilities.assert_equals(
3035 expect=main.TRUE,
3036 actual=FlowTables,
3037 onpass="No changes were found in the flow tables",
3038 onfail="Changes were found in the flow tables" )
3039
Jon Hallca319892017-06-15 15:25:22 -07003040 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003041 """
3042 main.step( "Check the continuous pings to ensure that no packets " +
3043 "were dropped during component failure" )
3044 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3045 main.params[ 'TESTONIP' ] )
3046 LossInPings = main.FALSE
3047 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3048 for i in range( 8, 18 ):
3049 main.log.info(
3050 "Checking for a loss in pings along flow from s" +
3051 str( i ) )
3052 LossInPings = main.Mininet2.checkForLoss(
3053 "/tmp/ping.h" +
3054 str( i ) ) or LossInPings
3055 if LossInPings == main.TRUE:
3056 main.log.info( "Loss in ping detected" )
3057 elif LossInPings == main.ERROR:
3058 main.log.info( "There are multiple mininet process running" )
3059 elif LossInPings == main.FALSE:
3060 main.log.info( "No Loss in the pings" )
3061 main.log.info( "No loss of dataplane connectivity" )
3062 utilities.assert_equals(
3063 expect=main.FALSE,
3064 actual=LossInPings,
3065 onpass="No Loss of connectivity",
3066 onfail="Loss of dataplane connectivity detected" )
3067 # NOTE: Since intents are not persisted with IntnentStore,
3068 # we expect loss in dataplane connectivity
3069 LossInPings = main.FALSE
3070 """
Devin Lim58046fa2017-07-05 16:55:00 -07003071 def compareTopo( self, main ):
3072 """
3073 Compare topo
3074 """
3075 import json
3076 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003077 assert main, "main not defined"
3078 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003079 try:
3080 from tests.dependencies.topology import Topology
3081 except ImportError:
3082 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003083 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003084 try:
3085 main.topoRelated
3086 except ( NameError, AttributeError ):
3087 main.topoRelated = Topology()
3088 main.case( "Compare ONOS Topology view to Mininet topology" )
3089 main.caseExplanation = "Compare topology objects between Mininet" +\
3090 " and ONOS"
3091 topoResult = main.FALSE
3092 topoFailMsg = "ONOS topology don't match Mininet"
3093 elapsed = 0
3094 count = 0
3095 main.step( "Comparing ONOS topology to MN topology" )
3096 startTime = time.time()
3097 # Give time for Gossip to work
3098 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3099 devicesResults = main.TRUE
3100 linksResults = main.TRUE
3101 hostsResults = main.TRUE
3102 hostAttachmentResults = True
3103 count += 1
3104 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003105 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003106 kwargs={ 'sleep': 5, 'attempts': 5,
3107 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003108 ipResult = main.TRUE
3109
Devin Lim142b5342017-07-20 15:22:39 -07003110 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003111 kwargs={ 'sleep': 5, 'attempts': 5,
3112 'randomTime': True },
3113 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003114
3115 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003116 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003117 if hosts[ controller ]:
3118 for host in hosts[ controller ]:
3119 if host is None or host.get( 'ipAddresses', [] ) == []:
3120 main.log.error(
3121 "Error with host ipAddresses on controller" +
3122 controllerStr + ": " + str( host ) )
3123 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003124 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003125 kwargs={ 'sleep': 5, 'attempts': 5,
3126 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003127 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003128 kwargs={ 'sleep': 5, 'attempts': 5,
3129 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003130 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003131 kwargs={ 'sleep': 5, 'attempts': 5,
3132 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003133
3134 elapsed = time.time() - startTime
3135 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003136 main.log.debug( "Elapsed time: " + str( elapsed ) )
3137 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003138
3139 if all( e is None for e in devices ) and\
3140 all( e is None for e in hosts ) and\
3141 all( e is None for e in ports ) and\
3142 all( e is None for e in links ) and\
3143 all( e is None for e in clusters ):
3144 topoFailMsg = "Could not get topology from ONOS"
3145 main.log.error( topoFailMsg )
3146 continue # Try again, No use trying to compare
3147
3148 mnSwitches = main.Mininet1.getSwitches()
3149 mnLinks = main.Mininet1.getLinks()
3150 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003151 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003152 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003153 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3154 controller,
3155 mnSwitches,
3156 devices,
3157 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003158 utilities.assert_equals( expect=main.TRUE,
3159 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003160 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003161 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003162 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003163 " Switches view is incorrect" )
3164
Devin Lim58046fa2017-07-05 16:55:00 -07003165 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003166 main.Mininet1.compareLinks,
3167 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003168 utilities.assert_equals( expect=main.TRUE,
3169 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003170 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003171 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003172 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003173 " links view is incorrect" )
3174 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3175 currentHostsResult = main.Mininet1.compareHosts(
3176 mnHosts,
3177 hosts[ controller ] )
3178 elif hosts[ controller ] == []:
3179 currentHostsResult = main.TRUE
3180 else:
3181 currentHostsResult = main.FALSE
3182 utilities.assert_equals( expect=main.TRUE,
3183 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003184 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003185 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003186 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003187 " hosts don't match Mininet" )
3188 # CHECKING HOST ATTACHMENT POINTS
3189 hostAttachment = True
3190 zeroHosts = False
3191 # FIXME: topo-HA/obelisk specific mappings:
3192 # key is mac and value is dpid
3193 mappings = {}
3194 for i in range( 1, 29 ): # hosts 1 through 28
3195 # set up correct variables:
3196 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3197 if i == 1:
3198 deviceId = "1000".zfill( 16 )
3199 elif i == 2:
3200 deviceId = "2000".zfill( 16 )
3201 elif i == 3:
3202 deviceId = "3000".zfill( 16 )
3203 elif i == 4:
3204 deviceId = "3004".zfill( 16 )
3205 elif i == 5:
3206 deviceId = "5000".zfill( 16 )
3207 elif i == 6:
3208 deviceId = "6000".zfill( 16 )
3209 elif i == 7:
3210 deviceId = "6007".zfill( 16 )
3211 elif i >= 8 and i <= 17:
3212 dpid = '3' + str( i ).zfill( 3 )
3213 deviceId = dpid.zfill( 16 )
3214 elif i >= 18 and i <= 27:
3215 dpid = '6' + str( i ).zfill( 3 )
3216 deviceId = dpid.zfill( 16 )
3217 elif i == 28:
3218 deviceId = "2800".zfill( 16 )
3219 mappings[ macId ] = deviceId
3220 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3221 if hosts[ controller ] == []:
3222 main.log.warn( "There are no hosts discovered" )
3223 zeroHosts = True
3224 else:
3225 for host in hosts[ controller ]:
3226 mac = None
3227 location = None
3228 device = None
3229 port = None
3230 try:
3231 mac = host.get( 'mac' )
3232 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003233 print host
3234 if 'locations' in host:
3235 location = host.get( 'locations' )[ 0 ]
3236 elif 'location' in host:
3237 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003238 assert location, "location field could not be found for this host object"
3239
3240 # Trim the protocol identifier off deviceId
3241 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3242 assert device, "elementId field could not be found for this host location object"
3243
3244 port = location.get( 'port' )
3245 assert port, "port field could not be found for this host location object"
3246
3247 # Now check if this matches where they should be
3248 if mac and device and port:
3249 if str( port ) != "1":
3250 main.log.error( "The attachment port is incorrect for " +
3251 "host " + str( mac ) +
3252 ". Expected: 1 Actual: " + str( port ) )
3253 hostAttachment = False
3254 if device != mappings[ str( mac ) ]:
3255 main.log.error( "The attachment device is incorrect for " +
3256 "host " + str( mac ) +
3257 ". Expected: " + mappings[ str( mac ) ] +
3258 " Actual: " + device )
3259 hostAttachment = False
3260 else:
3261 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003262 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003263 main.log.exception( "Json object not as expected" )
3264 main.log.error( repr( host ) )
3265 hostAttachment = False
3266 else:
3267 main.log.error( "No hosts json output or \"Error\"" +
3268 " in output. hosts = " +
3269 repr( hosts[ controller ] ) )
3270 if zeroHosts is False:
3271 # TODO: Find a way to know if there should be hosts in a
3272 # given point of the test
3273 hostAttachment = True
3274
3275 # END CHECKING HOST ATTACHMENT POINTS
3276 devicesResults = devicesResults and currentDevicesResult
3277 linksResults = linksResults and currentLinksResult
3278 hostsResults = hostsResults and currentHostsResult
3279 hostAttachmentResults = hostAttachmentResults and\
3280 hostAttachment
3281 topoResult = ( devicesResults and linksResults
3282 and hostsResults and ipResult and
3283 hostAttachmentResults )
3284 utilities.assert_equals( expect=True,
3285 actual=topoResult,
3286 onpass="ONOS topology matches Mininet",
3287 onfail=topoFailMsg )
3288 # End of While loop to pull ONOS state
3289
3290 # Compare json objects for hosts and dataplane clusters
3291
3292 # hosts
3293 main.step( "Hosts view is consistent across all ONOS nodes" )
3294 consistentHostsResult = main.TRUE
3295 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003296 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003297 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3298 if hosts[ controller ] == hosts[ 0 ]:
3299 continue
3300 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003301 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003302 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003303 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 consistentHostsResult = main.FALSE
3305
3306 else:
Jon Hallca319892017-06-15 15:25:22 -07003307 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003308 controllerStr )
3309 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003310 main.log.debug( controllerStr +
3311 " hosts response: " +
3312 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003313 utilities.assert_equals(
3314 expect=main.TRUE,
3315 actual=consistentHostsResult,
3316 onpass="Hosts view is consistent across all ONOS nodes",
3317 onfail="ONOS nodes have different views of hosts" )
3318
3319 main.step( "Hosts information is correct" )
3320 hostsResults = hostsResults and ipResult
3321 utilities.assert_equals(
3322 expect=main.TRUE,
3323 actual=hostsResults,
3324 onpass="Host information is correct",
3325 onfail="Host information is incorrect" )
3326
3327 main.step( "Host attachment points to the network" )
3328 utilities.assert_equals(
3329 expect=True,
3330 actual=hostAttachmentResults,
3331 onpass="Hosts are correctly attached to the network",
3332 onfail="ONOS did not correctly attach hosts to the network" )
3333
3334 # Strongly connected clusters of devices
3335 main.step( "Clusters view is consistent across all ONOS nodes" )
3336 consistentClustersResult = main.TRUE
3337 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003338 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003339 if "Error" not in clusters[ controller ]:
3340 if clusters[ controller ] == clusters[ 0 ]:
3341 continue
3342 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003343 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003344 controllerStr +
3345 " is inconsistent with ONOS1" )
3346 consistentClustersResult = main.FALSE
3347 else:
3348 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003349 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003350 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003351 main.log.debug( controllerStr +
3352 " clusters response: " +
3353 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003354 utilities.assert_equals(
3355 expect=main.TRUE,
3356 actual=consistentClustersResult,
3357 onpass="Clusters view is consistent across all ONOS nodes",
3358 onfail="ONOS nodes have different views of clusters" )
3359 if not consistentClustersResult:
3360 main.log.debug( clusters )
3361 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003362 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003363
3364 main.step( "There is only one SCC" )
3365 # there should always only be one cluster
3366 try:
3367 numClusters = len( json.loads( clusters[ 0 ] ) )
3368 except ( ValueError, TypeError ):
3369 main.log.exception( "Error parsing clusters[0]: " +
3370 repr( clusters[ 0 ] ) )
3371 numClusters = "ERROR"
3372 clusterResults = main.FALSE
3373 if numClusters == 1:
3374 clusterResults = main.TRUE
3375 utilities.assert_equals(
3376 expect=1,
3377 actual=numClusters,
3378 onpass="ONOS shows 1 SCC",
3379 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3380
3381 topoResult = ( devicesResults and linksResults
3382 and hostsResults and consistentHostsResult
3383 and consistentClustersResult and clusterResults
3384 and ipResult and hostAttachmentResults )
3385
3386 topoResult = topoResult and int( count <= 2 )
3387 note = "note it takes about " + str( int( cliTime ) ) + \
3388 " seconds for the test to make all the cli calls to fetch " +\
3389 "the topology from each ONOS instance"
3390 main.log.info(
3391 "Very crass estimate for topology discovery/convergence( " +
3392 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3393 str( count ) + " tries" )
3394
3395 main.step( "Device information is correct" )
3396 utilities.assert_equals(
3397 expect=main.TRUE,
3398 actual=devicesResults,
3399 onpass="Device information is correct",
3400 onfail="Device information is incorrect" )
3401
3402 main.step( "Links are correct" )
3403 utilities.assert_equals(
3404 expect=main.TRUE,
3405 actual=linksResults,
3406 onpass="Link are correct",
3407 onfail="Links are incorrect" )
3408
3409 main.step( "Hosts are correct" )
3410 utilities.assert_equals(
3411 expect=main.TRUE,
3412 actual=hostsResults,
3413 onpass="Hosts are correct",
3414 onfail="Hosts are incorrect" )
3415
3416 # FIXME: move this to an ONOS state case
3417 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003418 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003419 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003420 attempts=5 )
3421 utilities.assert_equals( expect=True, actual=nodeResults,
3422 onpass="Nodes check successful",
3423 onfail="Nodes check NOT successful" )
3424 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003425 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003426 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003427 ctrl.name,
3428 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003429
3430 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003431 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003432
Devin Lim58046fa2017-07-05 16:55:00 -07003433 def linkDown( self, main, fromS="s3", toS="s28" ):
3434 """
3435 Link fromS-toS down
3436 """
3437 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003438 assert main, "main not defined"
3439 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003440 # NOTE: You should probably run a topology check after this
3441
3442 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3443
3444 description = "Turn off a link to ensure that Link Discovery " +\
3445 "is working properly"
3446 main.case( description )
3447
3448 main.step( "Kill Link between " + fromS + " and " + toS )
3449 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3450 main.log.info( "Waiting " + str( linkSleep ) +
3451 " seconds for link down to be discovered" )
3452 time.sleep( linkSleep )
3453 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3454 onpass="Link down successful",
3455 onfail="Failed to bring link down" )
3456 # TODO do some sort of check here
3457
3458 def linkUp( self, main, fromS="s3", toS="s28" ):
3459 """
3460 Link fromS-toS up
3461 """
3462 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003463 assert main, "main not defined"
3464 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003465 # NOTE: You should probably run a topology check after this
3466
3467 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3468
3469 description = "Restore a link to ensure that Link Discovery is " + \
3470 "working properly"
3471 main.case( description )
3472
Jon Hall4173b242017-09-12 17:04:38 -07003473 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003474 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3475 main.log.info( "Waiting " + str( linkSleep ) +
3476 " seconds for link up to be discovered" )
3477 time.sleep( linkSleep )
3478 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3479 onpass="Link up successful",
3480 onfail="Failed to bring link up" )
3481
3482 def switchDown( self, main ):
3483 """
3484 Switch Down
3485 """
3486 # NOTE: You should probably run a topology check after this
3487 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003488 assert main, "main not defined"
3489 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003490
3491 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3492
3493 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003494 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003495 main.case( description )
3496 switch = main.params[ 'kill' ][ 'switch' ]
3497 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3498
3499 # TODO: Make this switch parameterizable
3500 main.step( "Kill " + switch )
3501 main.log.info( "Deleting " + switch )
3502 main.Mininet1.delSwitch( switch )
3503 main.log.info( "Waiting " + str( switchSleep ) +
3504 " seconds for switch down to be discovered" )
3505 time.sleep( switchSleep )
3506 device = onosCli.getDevice( dpid=switchDPID )
3507 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003508 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003509 result = main.FALSE
3510 if device and device[ 'available' ] is False:
3511 result = main.TRUE
3512 utilities.assert_equals( expect=main.TRUE, actual=result,
3513 onpass="Kill switch successful",
3514 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003515
Devin Lim58046fa2017-07-05 16:55:00 -07003516 def switchUp( self, main ):
3517 """
3518 Switch Up
3519 """
3520 # NOTE: You should probably run a topology check after this
3521 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003522 assert main, "main not defined"
3523 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003524
3525 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3526 switch = main.params[ 'kill' ][ 'switch' ]
3527 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3528 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003529 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003530 description = "Adding a switch to ensure it is discovered correctly"
3531 main.case( description )
3532
3533 main.step( "Add back " + switch )
3534 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3535 for peer in links:
3536 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003537 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003538 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3539 main.log.info( "Waiting " + str( switchSleep ) +
3540 " seconds for switch up to be discovered" )
3541 time.sleep( switchSleep )
3542 device = onosCli.getDevice( dpid=switchDPID )
3543 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003544 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003545 result = main.FALSE
3546 if device and device[ 'available' ]:
3547 result = main.TRUE
3548 utilities.assert_equals( expect=main.TRUE, actual=result,
3549 onpass="add switch successful",
3550 onfail="Failed to add switch?" )
3551
3552 def startElectionApp( self, main ):
3553 """
3554 start election app on all onos nodes
3555 """
Devin Lim58046fa2017-07-05 16:55:00 -07003556 assert main, "main not defined"
3557 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003558
3559 main.case( "Start Leadership Election app" )
3560 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003561 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003562 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003563 utilities.assert_equals(
3564 expect=main.TRUE,
3565 actual=appResult,
3566 onpass="Election app installed",
3567 onfail="Something went wrong with installing Leadership election" )
3568
3569 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003570 onosCli.electionTestRun()
3571 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003572 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003573 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003574 utilities.assert_equals(
3575 expect=True,
3576 actual=sameResult,
3577 onpass="All nodes see the same leaderboards",
3578 onfail="Inconsistent leaderboards" )
3579
3580 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003581 # Check that the leader is one of the active nodes
3582 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003583 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003584 if leader in ips:
3585 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003586 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003587 legitimate = False
3588 main.log.debug( leaders )
3589 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003590 utilities.assert_equals(
3591 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003592 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003593 onpass="Correct leader was elected",
3594 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003595 main.Cluster.testLeader = leader
3596
Devin Lim58046fa2017-07-05 16:55:00 -07003597 def isElectionFunctional( self, main ):
3598 """
3599 Check that Leadership Election is still functional
3600 15.1 Run election on each node
3601 15.2 Check that each node has the same leaders and candidates
3602 15.3 Find current leader and withdraw
3603 15.4 Check that a new node was elected leader
3604 15.5 Check that that new leader was the candidate of old leader
3605 15.6 Run for election on old leader
3606 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3607 15.8 Make sure that the old leader was added to the candidate list
3608
3609 old and new variable prefixes refer to data from before vs after
3610 withdrawl and later before withdrawl vs after re-election
3611 """
3612 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003613 assert main, "main not defined"
3614 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003615
3616 description = "Check that Leadership Election is still functional"
3617 main.case( description )
3618 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3619
3620 oldLeaders = [] # list of lists of each nodes' candidates before
3621 newLeaders = [] # list of lists of each nodes' candidates after
3622 oldLeader = '' # the old leader from oldLeaders, None if not same
3623 newLeader = '' # the new leaders fron newLoeaders, None if not same
3624 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3625 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003626 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003627 expectNoLeader = True
3628
3629 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003630 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003631 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003632 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003633 actual=electionResult,
3634 onpass="All nodes successfully ran for leadership",
3635 onfail="At least one node failed to run for leadership" )
3636
3637 if electionResult == main.FALSE:
3638 main.log.error(
3639 "Skipping Test Case because Election Test App isn't loaded" )
3640 main.skipCase()
3641
3642 main.step( "Check that each node shows the same leader and candidates" )
3643 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003644 activeCLIs = main.Cluster.active()
3645 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003646 if sameResult:
3647 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003648 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003649 else:
3650 oldLeader = None
3651 utilities.assert_equals(
3652 expect=True,
3653 actual=sameResult,
3654 onpass="Leaderboards are consistent for the election topic",
3655 onfail=failMessage )
3656
3657 main.step( "Find current leader and withdraw" )
3658 withdrawResult = main.TRUE
3659 # do some sanity checking on leader before using it
3660 if oldLeader is None:
3661 main.log.error( "Leadership isn't consistent." )
3662 withdrawResult = main.FALSE
3663 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003664 for ctrl in main.Cluster.active():
3665 if oldLeader == ctrl.ipAddress:
3666 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003667 break
3668 else: # FOR/ELSE statement
3669 main.log.error( "Leader election, could not find current leader" )
3670 if oldLeader:
3671 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3672 utilities.assert_equals(
3673 expect=main.TRUE,
3674 actual=withdrawResult,
3675 onpass="Node was withdrawn from election",
3676 onfail="Node was not withdrawn from election" )
3677
3678 main.step( "Check that a new node was elected leader" )
3679 failMessage = "Nodes have different leaders"
3680 # Get new leaders and candidates
3681 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3682 newLeader = None
3683 if newLeaderResult:
3684 if newLeaders[ 0 ][ 0 ] == 'none':
3685 main.log.error( "No leader was elected on at least 1 node" )
3686 if not expectNoLeader:
3687 newLeaderResult = False
3688 newLeader = newLeaders[ 0 ][ 0 ]
3689
3690 # Check that the new leader is not the older leader, which was withdrawn
3691 if newLeader == oldLeader:
3692 newLeaderResult = False
3693 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3694 " as the current leader" )
3695 utilities.assert_equals(
3696 expect=True,
3697 actual=newLeaderResult,
3698 onpass="Leadership election passed",
3699 onfail="Something went wrong with Leadership election" )
3700
3701 main.step( "Check that that new leader was the candidate of old leader" )
3702 # candidates[ 2 ] should become the top candidate after withdrawl
3703 correctCandidateResult = main.TRUE
3704 if expectNoLeader:
3705 if newLeader == 'none':
3706 main.log.info( "No leader expected. None found. Pass" )
3707 correctCandidateResult = main.TRUE
3708 else:
3709 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3710 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003711 utilities.assert_equals(
3712 expect=main.TRUE,
3713 actual=correctCandidateResult,
3714 onpass="Correct Candidate Elected",
3715 onfail="Incorrect Candidate Elected" )
3716
3717 main.step( "Run for election on old leader( just so everyone " +
3718 "is in the hat )" )
3719 if oldLeaderCLI is not None:
3720 runResult = oldLeaderCLI.electionTestRun()
3721 else:
3722 main.log.error( "No old leader to re-elect" )
3723 runResult = main.FALSE
3724 utilities.assert_equals(
3725 expect=main.TRUE,
3726 actual=runResult,
3727 onpass="App re-ran for election",
3728 onfail="App failed to run for election" )
3729
3730 main.step(
3731 "Check that oldLeader is a candidate, and leader if only 1 node" )
3732 # verify leader didn't just change
3733 # Get new leaders and candidates
3734 reRunLeaders = []
3735 time.sleep( 5 ) # Paremterize
3736 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3737
Devin Lim58046fa2017-07-05 16:55:00 -07003738 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003739 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003740 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003741 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003742 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003743 assert main, "main not defined"
3744 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003745
3746 # Variables for the distributed primitives tests
3747 main.pCounterName = "TestON-Partitions"
3748 main.pCounterValue = 0
3749 main.onosSet = set( [] )
3750 main.onosSetName = "TestON-set"
3751
3752 description = "Install Primitives app"
3753 main.case( description )
3754 main.step( "Install Primitives app" )
3755 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003756 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003757 utilities.assert_equals( expect=main.TRUE,
3758 actual=appResults,
3759 onpass="Primitives app activated",
3760 onfail="Primitives app not activated" )
3761 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003762 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003763
3764 def upgradeInit( self, main ):
3765 '''
3766 Initiates an update
3767 '''
3768 main.step( "Send the command to initialize the upgrade" )
3769 ctrl = main.Cluster.next().CLI
3770 initialized = ctrl.issuInit()
3771 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3772 onpass="ISSU initialized",
3773 onfail="Error initializing the upgrade" )
3774
3775 main.step( "Check the status of the upgrade" )
3776 ctrl = main.Cluster.next().CLI
3777 status = ctrl.issu()
3778 main.log.debug( status )
3779 # TODO: check things here?
3780
3781 main.step( "Checking ONOS nodes" )
3782 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3783 False,
3784 sleep=15,
3785 attempts=5 )
3786 utilities.assert_equals( expect=True, actual=nodeResults,
3787 onpass="Nodes check successful",
3788 onfail="Nodes check NOT successful" )