blob: d7b64dec0dbcaae95a92394d567b0047e1dd1212 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
32 self.startingMininet()
33 # copy gen-partions file to ONOS
34 # NOTE: this assumes TestON and ONOS are on the same machine
35 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
36 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
37 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
38 main.ONOSbench.ip_address,
39 srcFile,
40 dstDir,
41 pwd=main.ONOSbench.pwd,
42 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070043
Devin Lim58046fa2017-07-05 16:55:00 -070044 def cleanUpGenPartition( self ):
45 # clean up gen-partitions file
46 try:
47 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
50 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
51 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
52 str( main.ONOSbench.handle.before ) )
53 except ( pexpect.TIMEOUT, pexpect.EOF ):
54 main.log.exception( "ONOSbench: pexpect exception found:" +
55 main.ONOSbench.handle.before )
56 main.cleanup()
57 main.exit()
Jon Hallca319892017-06-15 15:25:22 -070058
Devin Lim58046fa2017-07-05 16:55:00 -070059 def startingMininet( self ):
60 main.step( "Starting Mininet" )
61 # scp topo file to mininet
62 # TODO: move to params?
63 topoName = "obelisk.py"
64 filePath = main.ONOSbench.home + "/tools/test/topos/"
65 main.ONOSbench.scp( main.Mininet1,
66 filePath + topoName,
67 main.Mininet1.home,
68 direction="to" )
69 mnResult = main.Mininet1.startNet()
70 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
71 onpass="Mininet Started",
72 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070073
Devin Lim58046fa2017-07-05 16:55:00 -070074 def scalingMetadata( self ):
75 import re
76 main.scaling = main.params[ 'scaling' ].split( "," )
77 main.log.debug( main.scaling )
78 scale = main.scaling.pop( 0 )
79 main.log.debug( scale )
80 if "e" in scale:
81 equal = True
82 else:
83 equal = False
84 main.log.debug( equal )
85 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
86 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
87 utilities.assert_equals( expect=main.TRUE, actual=genResult,
88 onpass="New cluster metadata file generated",
89 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070090
Devin Lim58046fa2017-07-05 16:55:00 -070091 def swapNodeMetadata( self ):
92 if main.numCtrls >= 5:
93 main.numCtrls -= 2
94 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
96 genResult = main.Server.generateFile( main.numCtrls )
97 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim58046fa2017-07-05 16:55:00 -0700101 def customizeOnosService( self, metadataMethod ):
102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
115 main.step( "Generate initial metadata file" )
116 metadataMethod()
117
118 self.startingMininet()
119
120 main.step( "Copying backup config files" )
121 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
122 cp = main.ONOSbench.scp( main.ONOSbench,
123 main.onosServicepath,
124 main.onosServicepath + ".backup",
125 direction="to" )
126
127 utilities.assert_equals( expect=main.TRUE,
128 actual=cp,
129 onpass="Copy backup config file succeeded",
130 onfail="Copy backup config file failed" )
131 # we need to modify the onos-service file to use remote metadata file
132 # url for cluster metadata file
133 iface = main.params[ 'server' ].get( 'interface' )
134 ip = main.ONOSbench.getIpAddr( iface=iface )
135 metaFile = "cluster.json"
136 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
137 main.log.warn( javaArgs )
138 main.log.warn( repr( javaArgs ) )
139 handle = main.ONOSbench.handle
140 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
141 main.log.warn( sed )
142 main.log.warn( repr( sed ) )
143 handle.sendline( sed )
144 handle.expect( metaFile )
145 output = handle.before
146 handle.expect( "\$" )
147 output += handle.before
148 main.log.debug( repr( output ) )
149
150 def cleanUpOnosService( self ):
151 # Cleanup custom onos-service file
152 main.ONOSbench.scp( main.ONOSbench,
153 main.onosServicepath + ".backup",
154 main.onosServicepath,
155 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700156
Jon Halla440e872016-03-31 15:15:50 -0700157 def consistentCheck( self ):
158 """
159 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700160
Jon Hallf37d44d2017-05-24 10:37:30 -0700161 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700162 - onosCounters is the parsed json output of the counters command on
163 all nodes
164 - consistent is main.TRUE if all "TestON" counters are consitent across
165 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700166 """
Jon Halle1a3b752015-07-22 13:02:46 -0700167 try:
Jon Halla440e872016-03-31 15:15:50 -0700168 # Get onos counters results
169 onosCountersRaw = []
170 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700171 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700172 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700173 name="counters-" + str( ctrl ),
174 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700175 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700176 'randomTime': True } )
177 threads.append( t )
178 t.start()
179 for t in threads:
180 t.join()
181 onosCountersRaw.append( t.result )
182 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700183 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700184 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700185 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700186 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700187 main.log.error( "Could not parse counters response from " +
188 str( main.Cluster.active()[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700189 main.log.warn( repr( onosCountersRaw[ i ] ) )
190 onosCounters.append( [] )
191
192 testCounters = {}
193 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700194 # lookes like a dict whose keys are the name of the ONOS node and
195 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700196 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700197 # }
198 # NOTE: There is an assumtion that all nodes are active
199 # based on the above for loops
200 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700202 if 'TestON' in key:
Jon Hallca319892017-06-15 15:25:22 -0700203 node = str( main.Cluster.active()[ controller[ 0 ] ] )
Jon Halla440e872016-03-31 15:15:50 -0700204 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700205 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700206 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700207 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700208 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700209 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700210 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
211 if all( tmp ):
212 consistent = main.TRUE
213 else:
214 consistent = main.FALSE
215 main.log.error( "ONOS nodes have different values for counters:\n" +
216 testCounters )
217 return ( onosCounters, consistent )
218 except Exception:
219 main.log.exception( "" )
220 main.cleanup()
221 main.exit()
222
223 def counterCheck( self, counterName, counterValue ):
224 """
225 Checks that TestON counters are consistent across all nodes and that
226 specified counter is in ONOS with the given value
227 """
228 try:
229 correctResults = main.TRUE
230 # Get onos counters results and consistentCheck
231 onosCounters, consistent = self.consistentCheck()
232 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700233 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700234 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700235 onosValue = None
236 try:
237 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700238 except AttributeError:
Jon Hallca319892017-06-15 15:25:22 -0700239 node = str( main.Cluster.active()[ i ] )
240 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700241 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700242 correctResults = main.FALSE
243 if onosValue == counterValue:
244 main.log.info( counterName + " counter value is correct" )
245 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700246 main.log.error( counterName +
247 " counter value is incorrect," +
248 " expected value: " + str( counterValue ) +
249 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700250 correctResults = main.FALSE
251 return consistent and correctResults
252 except Exception:
253 main.log.exception( "" )
254 main.cleanup()
255 main.exit()
Jon Hall41d39f12016-04-11 22:54:35 -0700256
257 def consistentLeaderboards( self, nodes ):
258 TOPIC = 'org.onosproject.election'
259 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700260 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700261 for n in range( 5 ): # Retry in case election is still happening
262 leaderList = []
263 # Get all leaderboards
264 for cli in nodes:
265 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
266 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700267 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700268 leaderList is not None
269 main.log.debug( leaderList )
270 main.log.warn( result )
271 if result:
272 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700273 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700274 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
275 return ( result, leaderList )
276
277 def nodesCheck( self, nodes ):
278 nodesOutput = []
279 results = True
280 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700281 for node in nodes:
282 t = main.Thread( target=node.nodes,
283 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700284 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700291 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700292 for i in nodesOutput:
293 try:
294 current = json.loads( i )
295 activeIps = []
296 currentResult = False
297 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700298 if node[ 'state' ] == 'READY':
299 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700300 activeIps.sort()
301 if ips == activeIps:
302 currentResult = True
303 except ( ValueError, TypeError ):
304 main.log.error( "Error parsing nodes output" )
305 main.log.warn( repr( i ) )
306 currentResult = False
307 results = results and currentResult
308 return results
Jon Hallca319892017-06-15 15:25:22 -0700309
Devin Lim58046fa2017-07-05 16:55:00 -0700310 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
311 # GRAPHS
312 # NOTE: important params here:
313 # job = name of Jenkins job
314 # Plot Name = Plot-HA, only can be used if multiple plots
315 # index = The number of the graph under plot name
316 job = testName
317 graphs = '<ac:structured-macro ac:name="html">\n'
318 graphs += '<ac:plain-text-body><![CDATA[\n'
319 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
320 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
321 '&width=500&height=300"' +\
322 'noborder="0" width="500" height="300" scrolling="yes" ' +\
323 'seamless="seamless"></iframe>\n'
324 graphs += ']]></ac:plain-text-body>\n'
325 graphs += '</ac:structured-macro>\n'
326 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700327
Devin Lim58046fa2017-07-05 16:55:00 -0700328 def initialSetUp( self, serviceClean=False ):
329 """
330 rest of initialSetup
331 """
332
Devin Lim58046fa2017-07-05 16:55:00 -0700333
334 if main.params[ 'tcpdump' ].lower() == "true":
335 main.step( "Start Packet Capture MN" )
336 main.Mininet2.startTcpdump(
337 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
338 + "-MN.pcap",
339 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
340 port=main.params[ 'MNtcpdump' ][ 'port' ] )
341
342 if serviceClean:
343 main.step( "Clean up ONOS service changes" )
Jon Hallca319892017-06-15 15:25:22 -0700344 ONOSbench = main.Cluster.contollers[0].Bench
345 ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
346 ONOSbench.handle.expect( "\$" )
347 ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
348 ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700349
350 main.step( "Checking ONOS nodes" )
351 nodeResults = utilities.retry( self.nodesCheck,
352 False,
Jon Hallca319892017-06-15 15:25:22 -0700353 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700354 attempts=5 )
355
356 utilities.assert_equals( expect=True, actual=nodeResults,
357 onpass="Nodes check successful",
358 onfail="Nodes check NOT successful" )
359
360 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700361 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700362 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700363 ctrl.name,
364 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700365 main.log.error( "Failed to start ONOS, stopping test" )
366 main.cleanup()
367 main.exit()
368
369 main.step( "Activate apps defined in the params file" )
370 # get data from the params
371 apps = main.params.get( 'apps' )
372 if apps:
373 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700374 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700375 activateResult = True
376 for app in apps:
Jon Hallca319892017-06-15 15:25:22 -0700377 main.Cluster.active()[0].app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700378 # TODO: check this worked
379 time.sleep( 10 ) # wait for apps to activate
380 for app in apps:
Jon Hallca319892017-06-15 15:25:22 -0700381 state = main.Cluster.active()[0].appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700382 if state == "ACTIVE":
383 activateResult = activateResult and True
384 else:
385 main.log.error( "{} is in {} state".format( app, state ) )
386 activateResult = False
387 utilities.assert_equals( expect=True,
388 actual=activateResult,
389 onpass="Successfully activated apps",
390 onfail="Failed to activate apps" )
391 else:
392 main.log.warn( "No apps were specified to be loaded after startup" )
393
394 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700395 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700396 config = main.params.get( 'ONOS_Configuration' )
397 if config:
398 main.log.debug( config )
399 checkResult = main.TRUE
400 for component in config:
401 for setting in config[ component ]:
402 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700403 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700404 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
405 checkResult = check and checkResult
406 utilities.assert_equals( expect=main.TRUE,
407 actual=checkResult,
408 onpass="Successfully set config",
409 onfail="Failed to set config" )
410 else:
411 main.log.warn( "No configurations were specified to be changed after startup" )
412
Jon Hallca319892017-06-15 15:25:22 -0700413 main.step( "Check app ids" )
414 appCheck = self.appCheck()
415 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700416 onpass="App Ids seem to be correct",
417 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700418
Jon Hallca319892017-06-15 15:25:22 -0700419 def commonChecks( self ):
420 # TODO: make this assertable or assert in here?
421 self.topicsCheck()
422 self.partitionsCheck()
423 self.pendingMapCheck()
424 self.appCheck()
425
426 def topicsCheck( self, extraTopics=[] ):
427 """
428 Check for work partition topics in leaders output
429 """
430 leaders = main.Cluster.next().leaders()
431 missing = False
432 try:
433 if leaders:
434 parsedLeaders = json.loads( leaders )
435 output = json.dumps( parsedLeaders,
436 sort_keys=True,
437 indent=4,
438 separators=( ',', ': ' ) )
439 main.log.debug( "Leaders: " + output )
440 # check for all intent partitions
441 topics = []
442 for i in range( 14 ):
443 topics.append( "work-partition-" + str( i ) )
444 topics += extraTopics
445 main.log.debug( topics )
446 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
447 for topic in topics:
448 if topic not in ONOStopics:
449 main.log.error( "Error: " + topic +
450 " not in leaders" )
451 missing = True
452 else:
453 main.log.error( "leaders() returned None" )
454 except ( ValueError, TypeError ):
455 main.log.exception( "Error parsing leaders" )
456 main.log.error( repr( leaders ) )
457 if missing:
458 #NOTE Can we refactor this into the Cluster class? Maybe an option to print the output of a command from each node?
459 for ctrl in main.Cluster.active():
460 response = ctrl.CLI.leaders( jsonFormat=False )
461 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
462 str( response ) )
463 return missing
464
465 def partitionsCheck( self ):
466 # TODO: return something assertable
467 partitions = main.Cluster.next().partitions()
468 try:
469 if partitions:
470 parsedPartitions = json.loads( partitions )
471 output = json.dumps( parsedPartitions,
472 sort_keys=True,
473 indent=4,
474 separators=( ',', ': ' ) )
475 main.log.debug( "Partitions: " + output )
476 # TODO check for a leader in all paritions
477 # TODO check for consistency among nodes
478 else:
479 main.log.error( "partitions() returned None" )
480 except ( ValueError, TypeError ):
481 main.log.exception( "Error parsing partitions" )
482 main.log.error( repr( partitions ) )
483
484 def pendingMapCheck( self ):
485 pendingMap = main.Cluster.next().pendingMap()
486 try:
487 if pendingMap:
488 parsedPending = json.loads( pendingMap )
489 output = json.dumps( parsedPending,
490 sort_keys=True,
491 indent=4,
492 separators=( ',', ': ' ) )
493 main.log.debug( "Pending map: " + output )
494 # TODO check something here?
495 else:
496 main.log.error( "pendingMap() returned None" )
497 except ( ValueError, TypeError ):
498 main.log.exception( "Error parsing pending map" )
499 main.log.error( repr( pendingMap ) )
500
501 def appCheck( self ):
502 """
503 Check App IDs on all nodes
504 """
505 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
506 appResults = main.Cluster.command( "appToIDCheck" )
507 appCheck = all( i == main.TRUE for i in appResults )
508 if not appCheck:
509 ctrl = main.Cluster.active()[0]
510 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
511 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
512 return appCheck
513
Jon Halle0f0b342017-04-18 11:43:47 -0700514 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
515 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700516 completedValues = main.Cluster.command( "workQueueTotalCompleted",
517 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700518 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700519 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700520 completedResult = all( completedResults )
521 if not completedResult:
522 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
523 workQueueName, completed, completedValues ) )
524
525 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700526 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
527 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700528 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700529 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700530 inProgressResult = all( inProgressResults )
531 if not inProgressResult:
532 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
533 workQueueName, inProgress, inProgressValues ) )
534
535 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700536 pendingValues = main.Cluster.command( "workQueueTotalPending",
537 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700538 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700539 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700540 pendingResult = all( pendingResults )
541 if not pendingResult:
542 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
543 workQueueName, pending, pendingValues ) )
544 return completedResult and inProgressResult and pendingResult
545
Devin Lim58046fa2017-07-05 16:55:00 -0700546 def assignDevices( self, main ):
547 """
548 Assign devices to controllers
549 """
550 import re
551 assert main.numCtrls, "main.numCtrls not defined"
552 assert main, "main not defined"
553 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700554
555 main.case( "Assigning devices to controllers" )
556 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
557 "and check that an ONOS node becomes the " + \
558 "master of the device."
559 main.step( "Assign switches to controllers" )
560
Jon Hallca319892017-06-15 15:25:22 -0700561 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700562 swList = []
563 for i in range( 1, 29 ):
564 swList.append( "s" + str( i ) )
565 main.Mininet1.assignSwController( sw=swList, ip=ipList )
566
567 mastershipCheck = main.TRUE
568 for i in range( 1, 29 ):
569 response = main.Mininet1.getSwController( "s" + str( i ) )
570 try:
571 main.log.info( str( response ) )
572 except Exception:
573 main.log.info( repr( response ) )
Jon Hallca319892017-06-15 15:25:22 -0700574 for ctrl in main.Cluster.controllers:
575 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700576 mastershipCheck = mastershipCheck and main.TRUE
577 else:
Jon Hallca319892017-06-15 15:25:22 -0700578 main.log.error( "Error, node " + repr( ctrl )+ " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700579 "not in the list of controllers s" +
580 str( i ) + " is connecting to." )
581 mastershipCheck = main.FALSE
582 utilities.assert_equals(
583 expect=main.TRUE,
584 actual=mastershipCheck,
585 onpass="Switch mastership assigned correctly",
586 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700587
Devin Lim58046fa2017-07-05 16:55:00 -0700588 def assignIntents( self, main ):
589 """
590 Assign intents
591 """
592 import time
593 import json
594 assert main.numCtrls, "main.numCtrls not defined"
595 assert main, "main not defined"
596 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700597 try:
598 main.HAlabels
599 except ( NameError, AttributeError ):
600 main.log.error( "main.HAlabels not defined, setting to []" )
601 main.HAlabels = []
602 try:
603 main.HAdata
604 except ( NameError, AttributeError ):
605 main.log.error( "data not defined, setting to []" )
606 main.HAdata = []
607 main.case( "Adding host Intents" )
608 main.caseExplanation = "Discover hosts by using pingall then " +\
609 "assign predetermined host-to-host intents." +\
610 " After installation, check that the intent" +\
611 " is distributed to all nodes and the state" +\
612 " is INSTALLED"
613
614 # install onos-app-fwd
615 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700616 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700617 installResults = onosCli.activateApp( "org.onosproject.fwd" )
618 utilities.assert_equals( expect=main.TRUE, actual=installResults,
619 onpass="Install fwd successful",
620 onfail="Install fwd failed" )
621
622 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700623 appCheck = self.appCheck()
624 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700625 onpass="App Ids seem to be correct",
626 onfail="Something is wrong with app Ids" )
627
628 main.step( "Discovering Hosts( Via pingall for now )" )
629 # FIXME: Once we have a host discovery mechanism, use that instead
630 # REACTIVE FWD test
631 pingResult = main.FALSE
632 passMsg = "Reactive Pingall test passed"
633 time1 = time.time()
634 pingResult = main.Mininet1.pingall()
635 time2 = time.time()
636 if not pingResult:
637 main.log.warn( "First pingall failed. Trying again..." )
638 pingResult = main.Mininet1.pingall()
639 passMsg += " on the second try"
640 utilities.assert_equals(
641 expect=main.TRUE,
642 actual=pingResult,
643 onpass=passMsg,
644 onfail="Reactive Pingall failed, " +
645 "one or more ping pairs failed" )
646 main.log.info( "Time for pingall: %2f seconds" %
647 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700648 if not pingResult:
649 main.cleanup()
650 main.exit()
Devin Lim58046fa2017-07-05 16:55:00 -0700651 # timeout for fwd flows
652 time.sleep( 11 )
653 # uninstall onos-app-fwd
654 main.step( "Uninstall reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700655 uninstallResult = onosCli.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700656 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
657 onpass="Uninstall fwd successful",
658 onfail="Uninstall fwd failed" )
659
660 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700661 appCheck2 = self.appCheck()
662 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700663 onpass="App Ids seem to be correct",
664 onfail="Something is wrong with app Ids" )
665
666 main.step( "Add host intents via cli" )
667 intentIds = []
668 # TODO: move the host numbers to params
669 # Maybe look at all the paths we ping?
670 intentAddResult = True
671 hostResult = main.TRUE
672 for i in range( 8, 18 ):
673 main.log.info( "Adding host intent between h" + str( i ) +
674 " and h" + str( i + 10 ) )
675 host1 = "00:00:00:00:00:" + \
676 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
677 host2 = "00:00:00:00:00:" + \
678 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
679 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700680 host1Dict = onosCli.CLI.getHost( host1 )
681 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700682 host1Id = None
683 host2Id = None
684 if host1Dict and host2Dict:
685 host1Id = host1Dict.get( 'id', None )
686 host2Id = host2Dict.get( 'id', None )
687 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700688 nodeNum = len( main.Cluster.active() )
689 ctrl = main.Cluster.active()[ i % nodeNum ]
690 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700691 if tmpId:
692 main.log.info( "Added intent with id: " + tmpId )
693 intentIds.append( tmpId )
694 else:
695 main.log.error( "addHostIntent returned: " +
696 repr( tmpId ) )
697 else:
698 main.log.error( "Error, getHost() failed for h" + str( i ) +
699 " and/or h" + str( i + 10 ) )
Jon Hallca319892017-06-15 15:25:22 -0700700 hosts = main.Cluster.next().hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700701 try:
Jon Hallca319892017-06-15 15:25:22 -0700702 output = json.dumps( json.loads( hosts ),
703 sort_keys=True,
704 indent=4,
705 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700706 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700707 output = repr( hosts )
708 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700709 hostResult = main.FALSE
710 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
711 onpass="Found a host id for each host",
712 onfail="Error looking up host ids" )
713
714 intentStart = time.time()
715 onosIds = onosCli.getAllIntentsId()
716 main.log.info( "Submitted intents: " + str( intentIds ) )
717 main.log.info( "Intents in ONOS: " + str( onosIds ) )
718 for intent in intentIds:
719 if intent in onosIds:
720 pass # intent submitted is in onos
721 else:
722 intentAddResult = False
723 if intentAddResult:
724 intentStop = time.time()
725 else:
726 intentStop = None
727 # Print the intent states
728 intents = onosCli.intents()
729 intentStates = []
730 installedCheck = True
731 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
732 count = 0
733 try:
734 for intent in json.loads( intents ):
735 state = intent.get( 'state', None )
736 if "INSTALLED" not in state:
737 installedCheck = False
738 intentId = intent.get( 'id', None )
739 intentStates.append( ( intentId, state ) )
740 except ( ValueError, TypeError ):
741 main.log.exception( "Error parsing intents" )
742 # add submitted intents not in the store
743 tmplist = [ i for i, s in intentStates ]
744 missingIntents = False
745 for i in intentIds:
746 if i not in tmplist:
747 intentStates.append( ( i, " - " ) )
748 missingIntents = True
749 intentStates.sort()
750 for i, s in intentStates:
751 count += 1
752 main.log.info( "%-6s%-15s%-15s" %
753 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700754 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700755
756 intentAddResult = bool( intentAddResult and not missingIntents and
757 installedCheck )
758 if not intentAddResult:
759 main.log.error( "Error in pushing host intents to ONOS" )
760
761 main.step( "Intent Anti-Entropy dispersion" )
762 for j in range( 100 ):
763 correct = True
764 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700765 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700766 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700767 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700768 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700769 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700770 str( sorted( onosIds ) ) )
771 if sorted( ids ) != sorted( intentIds ):
772 main.log.warn( "Set of intent IDs doesn't match" )
773 correct = False
774 break
775 else:
Jon Hallca319892017-06-15 15:25:22 -0700776 intents = json.loads( ctrl.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700777 for intent in intents:
778 if intent[ 'state' ] != "INSTALLED":
779 main.log.warn( "Intent " + intent[ 'id' ] +
780 " is " + intent[ 'state' ] )
781 correct = False
782 break
783 if correct:
784 break
785 else:
786 time.sleep( 1 )
787 if not intentStop:
788 intentStop = time.time()
789 global gossipTime
790 gossipTime = intentStop - intentStart
791 main.log.info( "It took about " + str( gossipTime ) +
792 " seconds for all intents to appear in each node" )
793 append = False
794 title = "Gossip Intents"
795 count = 1
796 while append is False:
797 curTitle = title + str( count )
798 if curTitle not in main.HAlabels:
799 main.HAlabels.append( curTitle )
800 main.HAdata.append( str( gossipTime ) )
801 append = True
802 else:
803 count += 1
804 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Jon Hallca319892017-06-15 15:25:22 -0700805 maxGossipTime = gossipPeriod * len( main.Cluster.controllers )
Devin Lim58046fa2017-07-05 16:55:00 -0700806 utilities.assert_greater_equals(
807 expect=maxGossipTime, actual=gossipTime,
808 onpass="ECM anti-entropy for intents worked within " +
809 "expected time",
810 onfail="Intent ECM anti-entropy took too long. " +
811 "Expected time:{}, Actual time:{}".format( maxGossipTime,
812 gossipTime ) )
813 if gossipTime <= maxGossipTime:
814 intentAddResult = True
815
Jon Hallca319892017-06-15 15:25:22 -0700816 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700817 if not intentAddResult or "key" in pendingMap:
818 import time
819 installedCheck = True
820 main.log.info( "Sleeping 60 seconds to see if intents are found" )
821 time.sleep( 60 )
822 onosIds = onosCli.getAllIntentsId()
823 main.log.info( "Submitted intents: " + str( intentIds ) )
824 main.log.info( "Intents in ONOS: " + str( onosIds ) )
825 # Print the intent states
826 intents = onosCli.intents()
827 intentStates = []
828 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
829 count = 0
830 try:
831 for intent in json.loads( intents ):
832 # Iter through intents of a node
833 state = intent.get( 'state', None )
834 if "INSTALLED" not in state:
835 installedCheck = False
836 intentId = intent.get( 'id', None )
837 intentStates.append( ( intentId, state ) )
838 except ( ValueError, TypeError ):
839 main.log.exception( "Error parsing intents" )
840 # add submitted intents not in the store
841 tmplist = [ i for i, s in intentStates ]
842 for i in intentIds:
843 if i not in tmplist:
844 intentStates.append( ( i, " - " ) )
845 intentStates.sort()
846 for i, s in intentStates:
847 count += 1
848 main.log.info( "%-6s%-15s%-15s" %
849 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700850 self.topicsCheck( [ "org.onosproject.election" ] )
851 self.partitionsCheck()
852 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700853
Jon Hallca319892017-06-15 15:25:22 -0700854 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700855 """
856 Ping across added host intents
857 """
858 import json
859 import time
860 assert main.numCtrls, "main.numCtrls not defined"
861 assert main, "main not defined"
862 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700863 main.case( "Verify connectivity by sending traffic across Intents" )
864 main.caseExplanation = "Ping across added host intents to check " +\
865 "functionality and check the state of " +\
866 "the intent"
867
Jon Hallca319892017-06-15 15:25:22 -0700868 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700869 main.step( "Check Intent state" )
870 installedCheck = False
871 loopCount = 0
872 while not installedCheck and loopCount < 40:
873 installedCheck = True
874 # Print the intent states
Jon Hallca319892017-06-15 15:25:22 -0700875 intents = onosCli.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700876 intentStates = []
877 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
878 count = 0
879 # Iter through intents of a node
880 try:
881 for intent in json.loads( intents ):
882 state = intent.get( 'state', None )
883 if "INSTALLED" not in state:
884 installedCheck = False
885 intentId = intent.get( 'id', None )
886 intentStates.append( ( intentId, state ) )
887 except ( ValueError, TypeError ):
888 main.log.exception( "Error parsing intents." )
889 # Print states
890 intentStates.sort()
891 for i, s in intentStates:
892 count += 1
893 main.log.info( "%-6s%-15s%-15s" %
894 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700895 if not installedCheck:
896 time.sleep( 1 )
897 loopCount += 1
898 utilities.assert_equals( expect=True, actual=installedCheck,
899 onpass="Intents are all INSTALLED",
900 onfail="Intents are not all in " +
901 "INSTALLED state" )
902
903 main.step( "Ping across added host intents" )
904 PingResult = main.TRUE
905 for i in range( 8, 18 ):
906 ping = main.Mininet1.pingHost( src="h" + str( i ),
907 target="h" + str( i + 10 ) )
908 PingResult = PingResult and ping
909 if ping == main.FALSE:
910 main.log.warn( "Ping failed between h" + str( i ) +
911 " and h" + str( i + 10 ) )
912 elif ping == main.TRUE:
913 main.log.info( "Ping test passed!" )
914 # Don't set PingResult or you'd override failures
915 if PingResult == main.FALSE:
916 main.log.error(
917 "Intents have not been installed correctly, pings failed." )
918 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700919 try:
920 tmpIntents = onosCli.intents()
Jon Hallca319892017-06-15 15:25:22 -0700921 output = json.dumps( json.loads( tmpIntents ),
922 sort_keys=True,
923 indent=4,
924 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700925 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700926 output = repr( tmpIntents )
927 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700928 utilities.assert_equals(
929 expect=main.TRUE,
930 actual=PingResult,
931 onpass="Intents have been installed correctly and pings work",
932 onfail="Intents have not been installed correctly, pings failed." )
933
934 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700935 topicsCheck = self.topicsCheck()
936 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700937 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700938 onfail="Some topics were lost" )
939 self.partitionsCheck()
940 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700941
942 if not installedCheck:
943 main.log.info( "Waiting 60 seconds to see if the state of " +
944 "intents change" )
945 time.sleep( 60 )
946 # Print the intent states
947 intents = onosCli.intents()
948 intentStates = []
949 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
950 count = 0
951 # Iter through intents of a node
952 try:
953 for intent in json.loads( intents ):
954 state = intent.get( 'state', None )
955 if "INSTALLED" not in state:
956 installedCheck = False
957 intentId = intent.get( 'id', None )
958 intentStates.append( ( intentId, state ) )
959 except ( ValueError, TypeError ):
960 main.log.exception( "Error parsing intents." )
961 intentStates.sort()
962 for i, s in intentStates:
963 count += 1
964 main.log.info( "%-6s%-15s%-15s" %
965 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700966 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700967
Devin Lim58046fa2017-07-05 16:55:00 -0700968 # Print flowrules
Jon Hallca319892017-06-15 15:25:22 -0700969 main.log.debug( onosCli.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700970 main.step( "Wait a minute then ping again" )
971 # the wait is above
972 PingResult = main.TRUE
973 for i in range( 8, 18 ):
974 ping = main.Mininet1.pingHost( src="h" + str( i ),
975 target="h" + str( i + 10 ) )
976 PingResult = PingResult and ping
977 if ping == main.FALSE:
978 main.log.warn( "Ping failed between h" + str( i ) +
979 " and h" + str( i + 10 ) )
980 elif ping == main.TRUE:
981 main.log.info( "Ping test passed!" )
982 # Don't set PingResult or you'd override failures
983 if PingResult == main.FALSE:
984 main.log.error(
985 "Intents have not been installed correctly, pings failed." )
986 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700987 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700988 try:
989 tmpIntents = onosCli.intents()
990 main.log.warn( json.dumps( json.loads( tmpIntents ),
991 sort_keys=True,
992 indent=4,
993 separators=( ',', ': ' ) ) )
994 except ( ValueError, TypeError ):
995 main.log.warn( repr( tmpIntents ) )
996 utilities.assert_equals(
997 expect=main.TRUE,
998 actual=PingResult,
999 onpass="Intents have been installed correctly and pings work",
1000 onfail="Intents have not been installed correctly, pings failed." )
1001
1002 def readingState( self, main ):
1003 """
1004 Reading state of ONOS
1005 """
1006 import json
1007 import time
1008 assert main.numCtrls, "main.numCtrls not defined"
1009 assert main, "main not defined"
1010 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07001011 try:
1012 from tests.dependencies.topology import Topology
1013 except ImportError:
1014 main.log.error( "Topology not found exiting the test" )
1015 main.exit()
1016 try:
1017 main.topoRelated
1018 except ( NameError, AttributeError ):
1019 main.topoRelated = Topology()
1020 main.case( "Setting up and gathering data for current state" )
1021 # The general idea for this test case is to pull the state of
1022 # ( intents,flows, topology,... ) from each ONOS node
1023 # We can then compare them with each other and also with past states
1024
1025 main.step( "Check that each switch has a master" )
1026 global mastershipState
1027 mastershipState = '[]'
1028
1029 # Assert that each device has a master
Jon Hallca319892017-06-15 15:25:22 -07001030 rolesNotNull = all( [ i == main.TRUE for i in main.Cluster.command( "rolesNotNull" ) ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001031 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07001032 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07001033 actual=rolesNotNull,
1034 onpass="Each device has a master",
1035 onfail="Some devices don't have a master assigned" )
1036
1037 main.step( "Get the Mastership of each switch from each controller" )
Jon Hallca319892017-06-15 15:25:22 -07001038 ONOSMastership = main.Cluster.command( "roles" )
1039 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07001040 consistentMastership = True
1041 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001042 for i in range( len( ONOSMastership ) ):
Jon Hallca319892017-06-15 15:25:22 -07001043 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001044 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001045 main.log.error( "Error in getting " + node + " roles" )
1046 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001047 repr( ONOSMastership[ i ] ) )
1048 rolesResults = False
1049 utilities.assert_equals(
1050 expect=True,
1051 actual=rolesResults,
1052 onpass="No error in reading roles output",
1053 onfail="Error in reading roles from ONOS" )
1054
1055 main.step( "Check for consistency in roles from each controller" )
1056 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1057 main.log.info(
1058 "Switch roles are consistent across all ONOS nodes" )
1059 else:
1060 consistentMastership = False
1061 utilities.assert_equals(
1062 expect=True,
1063 actual=consistentMastership,
1064 onpass="Switch roles are consistent across all ONOS nodes",
1065 onfail="ONOS nodes have different views of switch roles" )
1066
1067 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001068 for i in range( len( main.Cluster.active() ) ):
1069 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001070 try:
1071 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001072 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001073 json.dumps(
1074 json.loads( ONOSMastership[ i ] ),
1075 sort_keys=True,
1076 indent=4,
1077 separators=( ',', ': ' ) ) )
1078 except ( ValueError, TypeError ):
1079 main.log.warn( repr( ONOSMastership[ i ] ) )
1080 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001081 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001082 mastershipState = ONOSMastership[ 0 ]
1083
1084 main.step( "Get the intents from each controller" )
1085 global intentState
1086 intentState = []
Jon Hallca319892017-06-15 15:25:22 -07001087 ONOSIntents = main.Cluster.command( "intents" )
1088 intentCheck = main.FALSE
1089 consistentIntents = True
1090 intentsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001091 for i in range( len( ONOSIntents ) ):
Jon Hallca319892017-06-15 15:25:22 -07001092 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001093 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001094 main.log.error( "Error in getting " + node + " intents" )
1095 main.log.warn( node + " intents response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001096 repr( ONOSIntents[ i ] ) )
1097 intentsResults = False
1098 utilities.assert_equals(
1099 expect=True,
1100 actual=intentsResults,
1101 onpass="No error in reading intents output",
1102 onfail="Error in reading intents from ONOS" )
1103
1104 main.step( "Check for consistency in Intents from each controller" )
1105 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1106 main.log.info( "Intents are consistent across all ONOS " +
1107 "nodes" )
1108 else:
1109 consistentIntents = False
1110 main.log.error( "Intents not consistent" )
1111 utilities.assert_equals(
1112 expect=True,
1113 actual=consistentIntents,
1114 onpass="Intents are consistent across all ONOS nodes",
1115 onfail="ONOS nodes have different views of intents" )
1116
1117 if intentsResults:
1118 # Try to make it easy to figure out what is happening
1119 #
1120 # Intent ONOS1 ONOS2 ...
1121 # 0x01 INSTALLED INSTALLING
1122 # ... ... ...
1123 # ... ... ...
1124 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001125 for ctrl in main.Cluster.active():
1126 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001127 main.log.warn( title )
1128 # get all intent keys in the cluster
1129 keys = []
1130 try:
1131 # Get the set of all intent keys
1132 for nodeStr in ONOSIntents:
1133 node = json.loads( nodeStr )
1134 for intent in node:
1135 keys.append( intent.get( 'id' ) )
1136 keys = set( keys )
1137 # For each intent key, print the state on each node
1138 for key in keys:
1139 row = "%-13s" % key
1140 for nodeStr in ONOSIntents:
1141 node = json.loads( nodeStr )
1142 for intent in node:
1143 if intent.get( 'id', "Error" ) == key:
1144 row += "%-15s" % intent.get( 'state' )
1145 main.log.warn( row )
1146 # End of intent state table
1147 except ValueError as e:
1148 main.log.exception( e )
1149 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1150
1151 if intentsResults and not consistentIntents:
1152 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001153 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001154 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1155 sort_keys=True,
1156 indent=4,
1157 separators=( ',', ': ' ) ) )
1158 for i in range( len( ONOSIntents ) ):
Jon Hallca319892017-06-15 15:25:22 -07001159 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001160 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001161 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001162 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1163 sort_keys=True,
1164 indent=4,
1165 separators=( ',', ': ' ) ) )
1166 else:
Jon Hallca319892017-06-15 15:25:22 -07001167 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001168 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001169 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001170 intentState = ONOSIntents[ 0 ]
1171
1172 main.step( "Get the flows from each controller" )
1173 global flowState
1174 flowState = []
Jon Hallca319892017-06-15 15:25:22 -07001175 ONOSFlows = main.Cluster.command( "flows" ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001176 ONOSFlowsJson = []
1177 flowCheck = main.FALSE
1178 consistentFlows = True
1179 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001180 for i in range( len( ONOSFlows ) ):
Jon Hallca319892017-06-15 15:25:22 -07001181 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001182 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001183 main.log.error( "Error in getting " + node + " flows" )
1184 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001185 repr( ONOSFlows[ i ] ) )
1186 flowsResults = False
1187 ONOSFlowsJson.append( None )
1188 else:
1189 try:
1190 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1191 except ( ValueError, TypeError ):
1192 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001193 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001194 " response as json." )
1195 main.log.error( repr( ONOSFlows[ i ] ) )
1196 ONOSFlowsJson.append( None )
1197 flowsResults = False
1198 utilities.assert_equals(
1199 expect=True,
1200 actual=flowsResults,
1201 onpass="No error in reading flows output",
1202 onfail="Error in reading flows from ONOS" )
1203
1204 main.step( "Check for consistency in Flows from each controller" )
1205 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1206 if all( tmp ):
1207 main.log.info( "Flow count is consistent across all ONOS nodes" )
1208 else:
1209 consistentFlows = False
1210 utilities.assert_equals(
1211 expect=True,
1212 actual=consistentFlows,
1213 onpass="The flow count is consistent across all ONOS nodes",
1214 onfail="ONOS nodes have different flow counts" )
1215
1216 if flowsResults and not consistentFlows:
1217 for i in range( len( ONOSFlows ) ):
Jon Hallca319892017-06-15 15:25:22 -07001218 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001219 try:
1220 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001221 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001222 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1223 indent=4, separators=( ',', ': ' ) ) )
1224 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001225 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001226 repr( ONOSFlows[ i ] ) )
1227 elif flowsResults and consistentFlows:
1228 flowCheck = main.TRUE
1229 flowState = ONOSFlows[ 0 ]
1230
1231 main.step( "Get the OF Table entries" )
1232 global flows
1233 flows = []
1234 for i in range( 1, 29 ):
1235 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1236 if flowCheck == main.FALSE:
1237 for table in flows:
1238 main.log.warn( table )
1239 # TODO: Compare switch flow tables with ONOS flow tables
1240
1241 main.step( "Start continuous pings" )
1242 main.Mininet2.pingLong(
1243 src=main.params[ 'PING' ][ 'source1' ],
1244 target=main.params[ 'PING' ][ 'target1' ],
1245 pingTime=500 )
1246 main.Mininet2.pingLong(
1247 src=main.params[ 'PING' ][ 'source2' ],
1248 target=main.params[ 'PING' ][ 'target2' ],
1249 pingTime=500 )
1250 main.Mininet2.pingLong(
1251 src=main.params[ 'PING' ][ 'source3' ],
1252 target=main.params[ 'PING' ][ 'target3' ],
1253 pingTime=500 )
1254 main.Mininet2.pingLong(
1255 src=main.params[ 'PING' ][ 'source4' ],
1256 target=main.params[ 'PING' ][ 'target4' ],
1257 pingTime=500 )
1258 main.Mininet2.pingLong(
1259 src=main.params[ 'PING' ][ 'source5' ],
1260 target=main.params[ 'PING' ][ 'target5' ],
1261 pingTime=500 )
1262 main.Mininet2.pingLong(
1263 src=main.params[ 'PING' ][ 'source6' ],
1264 target=main.params[ 'PING' ][ 'target6' ],
1265 pingTime=500 )
1266 main.Mininet2.pingLong(
1267 src=main.params[ 'PING' ][ 'source7' ],
1268 target=main.params[ 'PING' ][ 'target7' ],
1269 pingTime=500 )
1270 main.Mininet2.pingLong(
1271 src=main.params[ 'PING' ][ 'source8' ],
1272 target=main.params[ 'PING' ][ 'target8' ],
1273 pingTime=500 )
1274 main.Mininet2.pingLong(
1275 src=main.params[ 'PING' ][ 'source9' ],
1276 target=main.params[ 'PING' ][ 'target9' ],
1277 pingTime=500 )
1278 main.Mininet2.pingLong(
1279 src=main.params[ 'PING' ][ 'source10' ],
1280 target=main.params[ 'PING' ][ 'target10' ],
1281 pingTime=500 )
1282
1283 main.step( "Collecting topology information from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001284 devices = main.topoRelated.getAllDevices( main.Cluster.active(), False )
1285 hosts = main.topoRelated.getAllHosts( main.Cluster.active(), False, inJson=True )
1286 ports = main.topoRelated.getAllPorts( main.Cluster.active(), False )
1287 links = main.topoRelated.getAllLinks( main.Cluster.active(), False )
1288 clusters = main.topoRelated.getAllClusters( main.Cluster.active(), False )
Devin Lim58046fa2017-07-05 16:55:00 -07001289 # Compare json objects for hosts and dataplane clusters
1290
1291 # hosts
1292 main.step( "Host view is consistent across ONOS nodes" )
1293 consistentHostsResult = main.TRUE
1294 for controller in range( len( hosts ) ):
Jon Hallca319892017-06-15 15:25:22 -07001295 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001296 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1297 if hosts[ controller ] == hosts[ 0 ]:
1298 continue
1299 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001300 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001301 controllerStr +
1302 " is inconsistent with ONOS1" )
1303 main.log.warn( repr( hosts[ controller ] ) )
1304 consistentHostsResult = main.FALSE
1305
1306 else:
Jon Hallca319892017-06-15 15:25:22 -07001307 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001308 controllerStr )
1309 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001310 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001311 " hosts response: " +
1312 repr( hosts[ controller ] ) )
1313 utilities.assert_equals(
1314 expect=main.TRUE,
1315 actual=consistentHostsResult,
1316 onpass="Hosts view is consistent across all ONOS nodes",
1317 onfail="ONOS nodes have different views of hosts" )
1318
1319 main.step( "Each host has an IP address" )
1320 ipResult = main.TRUE
1321 for controller in range( 0, len( hosts ) ):
Jon Hallca319892017-06-15 15:25:22 -07001322 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001323 if hosts[ controller ]:
1324 for host in hosts[ controller ]:
1325 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001326 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001327 controllerStr + ": " + str( host ) )
1328 ipResult = main.FALSE
1329 utilities.assert_equals(
1330 expect=main.TRUE,
1331 actual=ipResult,
1332 onpass="The ips of the hosts aren't empty",
1333 onfail="The ip of at least one host is missing" )
1334
1335 # Strongly connected clusters of devices
1336 main.step( "Cluster view is consistent across ONOS nodes" )
1337 consistentClustersResult = main.TRUE
1338 for controller in range( len( clusters ) ):
Jon Hallca319892017-06-15 15:25:22 -07001339 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001340 if "Error" not in clusters[ controller ]:
1341 if clusters[ controller ] == clusters[ 0 ]:
1342 continue
1343 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001344 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001345 " is inconsistent with ONOS1" )
1346 consistentClustersResult = main.FALSE
1347
1348 else:
1349 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001350 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001351 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001352 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001353 " clusters response: " +
1354 repr( clusters[ controller ] ) )
1355 utilities.assert_equals(
1356 expect=main.TRUE,
1357 actual=consistentClustersResult,
1358 onpass="Clusters view is consistent across all ONOS nodes",
1359 onfail="ONOS nodes have different views of clusters" )
1360 if not consistentClustersResult:
1361 main.log.debug( clusters )
1362
1363 # there should always only be one cluster
1364 main.step( "Cluster view correct across ONOS nodes" )
1365 try:
1366 numClusters = len( json.loads( clusters[ 0 ] ) )
1367 except ( ValueError, TypeError ):
1368 main.log.exception( "Error parsing clusters[0]: " +
1369 repr( clusters[ 0 ] ) )
1370 numClusters = "ERROR"
1371 utilities.assert_equals(
1372 expect=1,
1373 actual=numClusters,
1374 onpass="ONOS shows 1 SCC",
1375 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1376
1377 main.step( "Comparing ONOS topology to MN" )
1378 devicesResults = main.TRUE
1379 linksResults = main.TRUE
1380 hostsResults = main.TRUE
1381 mnSwitches = main.Mininet1.getSwitches()
1382 mnLinks = main.Mininet1.getLinks()
1383 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001384 for controller in range( len( main.Cluster.active() ) ):
1385 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07001386 currentDevicesResult = main.topoRelated.compareDevicePort(
1387 main.Mininet1, controller,
1388 mnSwitches, devices, ports )
1389 utilities.assert_equals( expect=main.TRUE,
1390 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001391 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001392 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001393 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001394 " Switches view is incorrect" )
1395
1396 currentLinksResult = main.topoRelated.compareBase( links, controller,
1397 main.Mininet1.compareLinks,
1398 [ mnSwitches, mnLinks ] )
1399 utilities.assert_equals( expect=main.TRUE,
1400 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001401 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001402 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001403 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001404 " links view is incorrect" )
1405
1406 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1407 currentHostsResult = main.Mininet1.compareHosts(
1408 mnHosts,
1409 hosts[ controller ] )
1410 else:
1411 currentHostsResult = main.FALSE
1412 utilities.assert_equals( expect=main.TRUE,
1413 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001414 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001415 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001416 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001417 " hosts don't match Mininet" )
1418
1419 devicesResults = devicesResults and currentDevicesResult
1420 linksResults = linksResults and currentLinksResult
1421 hostsResults = hostsResults and currentHostsResult
1422
1423 main.step( "Device information is correct" )
1424 utilities.assert_equals(
1425 expect=main.TRUE,
1426 actual=devicesResults,
1427 onpass="Device information is correct",
1428 onfail="Device information is incorrect" )
1429
1430 main.step( "Links are correct" )
1431 utilities.assert_equals(
1432 expect=main.TRUE,
1433 actual=linksResults,
1434 onpass="Link are correct",
1435 onfail="Links are incorrect" )
1436
1437 main.step( "Hosts are correct" )
1438 utilities.assert_equals(
1439 expect=main.TRUE,
1440 actual=hostsResults,
1441 onpass="Hosts are correct",
1442 onfail="Hosts are incorrect" )
1443
1444 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001445 """
1446 Check for basic functionality with distributed primitives
1447 """
Jon Halle0f0b342017-04-18 11:43:47 -07001448 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001449 try:
1450 # Make sure variables are defined/set
1451 assert main.numCtrls, "main.numCtrls not defined"
1452 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001453 assert main.pCounterName, "main.pCounterName not defined"
1454 assert main.onosSetName, "main.onosSetName not defined"
1455 # NOTE: assert fails if value is 0/None/Empty/False
1456 try:
1457 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001458 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001459 main.log.error( "main.pCounterValue not defined, setting to 0" )
1460 main.pCounterValue = 0
1461 try:
1462 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001463 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001464 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001465 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001466 # Variables for the distributed primitives tests. These are local only
1467 addValue = "a"
1468 addAllValue = "a b c d e f"
1469 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001470 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001471 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001472 workQueueName = "TestON-Queue"
1473 workQueueCompleted = 0
1474 workQueueInProgress = 0
1475 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001476
1477 description = "Check for basic functionality with distributed " +\
1478 "primitives"
1479 main.case( description )
1480 main.caseExplanation = "Test the methods of the distributed " +\
1481 "primitives (counters and sets) throught the cli"
1482 # DISTRIBUTED ATOMIC COUNTERS
1483 # Partitioned counters
1484 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001485 pCounters = main.Cluster.command( "counterTestAddAndGet",
1486 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001487 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001488 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001489 main.pCounterValue += 1
1490 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001491 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001492 pCounterResults = True
1493 for i in addedPValues:
1494 tmpResult = i in pCounters
1495 pCounterResults = pCounterResults and tmpResult
1496 if not tmpResult:
1497 main.log.error( str( i ) + " is not in partitioned "
1498 "counter incremented results" )
1499 utilities.assert_equals( expect=True,
1500 actual=pCounterResults,
1501 onpass="Default counter incremented",
1502 onfail="Error incrementing default" +
1503 " counter" )
1504
1505 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001506 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1507 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001508 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001509 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001510 addedPValues.append( main.pCounterValue )
1511 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001512 # Check that counter incremented numController times
1513 pCounterResults = True
1514 for i in addedPValues:
1515 tmpResult = i in pCounters
1516 pCounterResults = pCounterResults and tmpResult
1517 if not tmpResult:
1518 main.log.error( str( i ) + " is not in partitioned "
1519 "counter incremented results" )
1520 utilities.assert_equals( expect=True,
1521 actual=pCounterResults,
1522 onpass="Default counter incremented",
1523 onfail="Error incrementing default" +
1524 " counter" )
1525
1526 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001527 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001528 utilities.assert_equals( expect=main.TRUE,
1529 actual=incrementCheck,
1530 onpass="Added counters are correct",
1531 onfail="Added counters are incorrect" )
1532
1533 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001534 pCounters = main.Cluster.command( "counterTestAddAndGet",
1535 args=[ main.pCounterName ],
1536 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001537 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001538 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001539 main.pCounterValue += -8
1540 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001541 # Check that counter incremented numController times
1542 pCounterResults = True
1543 for i in addedPValues:
1544 tmpResult = i in pCounters
1545 pCounterResults = pCounterResults and tmpResult
1546 if not tmpResult:
1547 main.log.error( str( i ) + " is not in partitioned "
1548 "counter incremented results" )
1549 utilities.assert_equals( expect=True,
1550 actual=pCounterResults,
1551 onpass="Default counter incremented",
1552 onfail="Error incrementing default" +
1553 " counter" )
1554
1555 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001556 pCounters = main.Cluster.command( "counterTestAddAndGet",
1557 args=[ main.pCounterName ],
1558 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001559 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001560 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001561 main.pCounterValue += 5
1562 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001563
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001564 # Check that counter incremented numController times
1565 pCounterResults = True
1566 for i in addedPValues:
1567 tmpResult = i in pCounters
1568 pCounterResults = pCounterResults and tmpResult
1569 if not tmpResult:
1570 main.log.error( str( i ) + " is not in partitioned "
1571 "counter incremented results" )
1572 utilities.assert_equals( expect=True,
1573 actual=pCounterResults,
1574 onpass="Default counter incremented",
1575 onfail="Error incrementing default" +
1576 " counter" )
1577
1578 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001579 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1580 args=[ main.pCounterName ],
1581 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001582 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001583 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 addedPValues.append( main.pCounterValue )
1585 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001586 # Check that counter incremented numController times
1587 pCounterResults = True
1588 for i in addedPValues:
1589 tmpResult = i in pCounters
1590 pCounterResults = pCounterResults and tmpResult
1591 if not tmpResult:
1592 main.log.error( str( i ) + " is not in partitioned "
1593 "counter incremented results" )
1594 utilities.assert_equals( expect=True,
1595 actual=pCounterResults,
1596 onpass="Default counter incremented",
1597 onfail="Error incrementing default" +
1598 " counter" )
1599
1600 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001601 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001602 utilities.assert_equals( expect=main.TRUE,
1603 actual=incrementCheck,
1604 onpass="Added counters are correct",
1605 onfail="Added counters are incorrect" )
1606
1607 # DISTRIBUTED SETS
1608 main.step( "Distributed Set get" )
1609 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001610 getResponses = main.Cluster.command( "setTestGet",
1611 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001612 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001613 for i in range( len( main.Cluster.active() ) ):
1614 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07001615 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001616 current = set( getResponses[ i ] )
1617 if len( current ) == len( getResponses[ i ] ):
1618 # no repeats
1619 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001620 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001621 " has incorrect view" +
1622 " of set " + main.onosSetName + ":\n" +
1623 str( getResponses[ i ] ) )
1624 main.log.debug( "Expected: " + str( main.onosSet ) )
1625 main.log.debug( "Actual: " + str( current ) )
1626 getResults = main.FALSE
1627 else:
1628 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001629 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001630 " has repeat elements in" +
1631 " set " + main.onosSetName + ":\n" +
1632 str( getResponses[ i ] ) )
1633 getResults = main.FALSE
1634 elif getResponses[ i ] == main.ERROR:
1635 getResults = main.FALSE
1636 utilities.assert_equals( expect=main.TRUE,
1637 actual=getResults,
1638 onpass="Set elements are correct",
1639 onfail="Set elements are incorrect" )
1640
1641 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001642 sizeResponses = main.Cluster.command( "setTestSize",
1643 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001644 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001645 for i in range( len( main.Cluster.active() ) ):
1646 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001647 if size != sizeResponses[ i ]:
1648 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001649 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001650 " expected a size of " + str( size ) +
1651 " for set " + main.onosSetName +
1652 " but got " + str( sizeResponses[ i ] ) )
1653 utilities.assert_equals( expect=main.TRUE,
1654 actual=sizeResults,
1655 onpass="Set sizes are correct",
1656 onfail="Set sizes are incorrect" )
1657
1658 main.step( "Distributed Set add()" )
1659 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001660 addResponses = main.Cluster.command( "setTestAdd",
1661 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001662 # main.TRUE = successfully changed the set
1663 # main.FALSE = action resulted in no change in set
1664 # main.ERROR - Some error in executing the function
1665 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001666 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001667 if addResponses[ i ] == main.TRUE:
1668 # All is well
1669 pass
1670 elif addResponses[ i ] == main.FALSE:
1671 # Already in set, probably fine
1672 pass
1673 elif addResponses[ i ] == main.ERROR:
1674 # Error in execution
1675 addResults = main.FALSE
1676 else:
1677 # unexpected result
1678 addResults = main.FALSE
1679 if addResults != main.TRUE:
1680 main.log.error( "Error executing set add" )
1681
1682 # Check if set is still correct
1683 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001684 getResponses = main.Cluster.command( "setTestGet",
1685 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001686 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001687 for i in range( len( main.Cluster.active() ) ):
1688 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07001689 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001690 current = set( getResponses[ i ] )
1691 if len( current ) == len( getResponses[ i ] ):
1692 # no repeats
1693 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001694 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001695 " of set " + main.onosSetName + ":\n" +
1696 str( getResponses[ i ] ) )
1697 main.log.debug( "Expected: " + str( main.onosSet ) )
1698 main.log.debug( "Actual: " + str( current ) )
1699 getResults = main.FALSE
1700 else:
1701 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001702 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001703 " set " + main.onosSetName + ":\n" +
1704 str( getResponses[ i ] ) )
1705 getResults = main.FALSE
1706 elif getResponses[ i ] == main.ERROR:
1707 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001708 sizeResponses = main.Cluster.command( "setTestSize",
1709 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001711 for i in range( len( main.Cluster.active() ) ):
1712 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001713 if size != sizeResponses[ i ]:
1714 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001715 main.log.error( node + " expected a size of " +
1716 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001717 " but got " + str( sizeResponses[ i ] ) )
1718 addResults = addResults and getResults and sizeResults
1719 utilities.assert_equals( expect=main.TRUE,
1720 actual=addResults,
1721 onpass="Set add correct",
1722 onfail="Set add was incorrect" )
1723
1724 main.step( "Distributed Set addAll()" )
1725 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001726 addResponses = main.Cluster.command( "setTestAdd",
1727 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001728 # main.TRUE = successfully changed the set
1729 # main.FALSE = action resulted in no change in set
1730 # main.ERROR - Some error in executing the function
1731 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001732 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001733 if addResponses[ i ] == main.TRUE:
1734 # All is well
1735 pass
1736 elif addResponses[ i ] == main.FALSE:
1737 # Already in set, probably fine
1738 pass
1739 elif addResponses[ i ] == main.ERROR:
1740 # Error in execution
1741 addAllResults = main.FALSE
1742 else:
1743 # unexpected result
1744 addAllResults = main.FALSE
1745 if addAllResults != main.TRUE:
1746 main.log.error( "Error executing set addAll" )
1747
1748 # Check if set is still correct
1749 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001750 getResponses = main.Cluster.command( "setTestGet",
1751 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001752 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001753 for i in range( len( main.Cluster.active() ) ):
1754 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07001755 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001756 current = set( getResponses[ i ] )
1757 if len( current ) == len( getResponses[ i ] ):
1758 # no repeats
1759 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001760 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001761 " of set " + main.onosSetName + ":\n" +
1762 str( getResponses[ i ] ) )
1763 main.log.debug( "Expected: " + str( main.onosSet ) )
1764 main.log.debug( "Actual: " + str( current ) )
1765 getResults = main.FALSE
1766 else:
1767 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001768 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001769 " set " + main.onosSetName + ":\n" +
1770 str( getResponses[ i ] ) )
1771 getResults = main.FALSE
1772 elif getResponses[ i ] == main.ERROR:
1773 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001774 sizeResponses = main.Cluster.command( "setTestSize",
1775 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001776 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001777 for i in range( len( main.Cluster.active() ) ):
1778 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001779 if size != sizeResponses[ i ]:
1780 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001781 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001782 " for set " + main.onosSetName +
1783 " but got " + str( sizeResponses[ i ] ) )
1784 addAllResults = addAllResults and getResults and sizeResults
1785 utilities.assert_equals( expect=main.TRUE,
1786 actual=addAllResults,
1787 onpass="Set addAll correct",
1788 onfail="Set addAll was incorrect" )
1789
1790 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001791 containsResponses = main.Cluster.command( "setTestGet",
1792 args=[ main.onosSetName ],
1793 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001794 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001795 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001796 if containsResponses[ i ] == main.ERROR:
1797 containsResults = main.FALSE
1798 else:
1799 containsResults = containsResults and\
1800 containsResponses[ i ][ 1 ]
1801 utilities.assert_equals( expect=main.TRUE,
1802 actual=containsResults,
1803 onpass="Set contains is functional",
1804 onfail="Set contains failed" )
1805
1806 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001807 containsAllResponses = main.Cluster.command( "setTestGet",
1808 args=[ main.onosSetName ],
1809 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001810 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001811 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001812 if containsResponses[ i ] == main.ERROR:
1813 containsResults = main.FALSE
1814 else:
1815 containsResults = containsResults and\
1816 containsResponses[ i ][ 1 ]
1817 utilities.assert_equals( expect=main.TRUE,
1818 actual=containsAllResults,
1819 onpass="Set containsAll is functional",
1820 onfail="Set containsAll failed" )
1821
1822 main.step( "Distributed Set remove()" )
1823 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001824 removeResponses = main.Cluster.command( "setTestRemove",
1825 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001826 # main.TRUE = successfully changed the set
1827 # main.FALSE = action resulted in no change in set
1828 # main.ERROR - Some error in executing the function
1829 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001830 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001831 if removeResponses[ i ] == main.TRUE:
1832 # All is well
1833 pass
1834 elif removeResponses[ i ] == main.FALSE:
1835 # not in set, probably fine
1836 pass
1837 elif removeResponses[ i ] == main.ERROR:
1838 # Error in execution
1839 removeResults = main.FALSE
1840 else:
1841 # unexpected result
1842 removeResults = main.FALSE
1843 if removeResults != main.TRUE:
1844 main.log.error( "Error executing set remove" )
1845
1846 # Check if set is still correct
1847 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001848 getResponses = main.Cluster.command( "setTestGet",
1849 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001850 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001851 for i in range( len( main.Cluster.active() ) ):
1852 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07001853 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001854 current = set( getResponses[ i ] )
1855 if len( current ) == len( getResponses[ i ] ):
1856 # no repeats
1857 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001858 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001859 " of set " + main.onosSetName + ":\n" +
1860 str( getResponses[ i ] ) )
1861 main.log.debug( "Expected: " + str( main.onosSet ) )
1862 main.log.debug( "Actual: " + str( current ) )
1863 getResults = main.FALSE
1864 else:
1865 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001866 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001867 " set " + main.onosSetName + ":\n" +
1868 str( getResponses[ i ] ) )
1869 getResults = main.FALSE
1870 elif getResponses[ i ] == main.ERROR:
1871 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001872 sizeResponses = main.Cluster.command( "setTestSize",
1873 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001874 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001875 for i in range( len( main.Cluster.active() ) ):
1876 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001877 if size != sizeResponses[ i ]:
1878 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001879 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001880 " for set " + main.onosSetName +
1881 " but got " + str( sizeResponses[ i ] ) )
1882 removeResults = removeResults and getResults and sizeResults
1883 utilities.assert_equals( expect=main.TRUE,
1884 actual=removeResults,
1885 onpass="Set remove correct",
1886 onfail="Set remove was incorrect" )
1887
1888 main.step( "Distributed Set removeAll()" )
1889 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001890 removeAllResponses = main.Cluster.command( "setTestRemove",
1891 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001892 # main.TRUE = successfully changed the set
1893 # main.FALSE = action resulted in no change in set
1894 # main.ERROR - Some error in executing the function
1895 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001896 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001897 if removeAllResponses[ i ] == main.TRUE:
1898 # All is well
1899 pass
1900 elif removeAllResponses[ i ] == main.FALSE:
1901 # not in set, probably fine
1902 pass
1903 elif removeAllResponses[ i ] == main.ERROR:
1904 # Error in execution
1905 removeAllResults = main.FALSE
1906 else:
1907 # unexpected result
1908 removeAllResults = main.FALSE
1909 if removeAllResults != main.TRUE:
1910 main.log.error( "Error executing set removeAll" )
1911
1912 # Check if set is still correct
1913 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001914 getResponses = main.Cluster.command( "setTestGet",
1915 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001916 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001917 for i in range( len( main.Cluster.active() ) ):
1918 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07001919 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001920 current = set( getResponses[ i ] )
1921 if len( current ) == len( getResponses[ i ] ):
1922 # no repeats
1923 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001924 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001925 " of set " + main.onosSetName + ":\n" +
1926 str( getResponses[ i ] ) )
1927 main.log.debug( "Expected: " + str( main.onosSet ) )
1928 main.log.debug( "Actual: " + str( current ) )
1929 getResults = main.FALSE
1930 else:
1931 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001932 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001933 " set " + main.onosSetName + ":\n" +
1934 str( getResponses[ i ] ) )
1935 getResults = main.FALSE
1936 elif getResponses[ i ] == main.ERROR:
1937 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001938 sizeResponses = main.Cluster.command( "setTestSize",
1939 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001940 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001941 for i in range( len( main.Cluster.active() ) ):
1942 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001943 if size != sizeResponses[ i ]:
1944 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001945 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001946 " for set " + main.onosSetName +
1947 " but got " + str( sizeResponses[ i ] ) )
1948 removeAllResults = removeAllResults and getResults and sizeResults
1949 utilities.assert_equals( expect=main.TRUE,
1950 actual=removeAllResults,
1951 onpass="Set removeAll correct",
1952 onfail="Set removeAll was incorrect" )
1953
1954 main.step( "Distributed Set addAll()" )
1955 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001956 addResponses = main.Cluster.command( "setTestAdd",
1957 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001958 # main.TRUE = successfully changed the set
1959 # main.FALSE = action resulted in no change in set
1960 # main.ERROR - Some error in executing the function
1961 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001962 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001963 if addResponses[ i ] == main.TRUE:
1964 # All is well
1965 pass
1966 elif addResponses[ i ] == main.FALSE:
1967 # Already in set, probably fine
1968 pass
1969 elif addResponses[ i ] == main.ERROR:
1970 # Error in execution
1971 addAllResults = main.FALSE
1972 else:
1973 # unexpected result
1974 addAllResults = main.FALSE
1975 if addAllResults != main.TRUE:
1976 main.log.error( "Error executing set addAll" )
1977
1978 # Check if set is still correct
1979 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001980 getResponses = main.Cluster.command( "setTestGet",
1981 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001982 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001983 for i in range( len( main.Cluster.active() ) ):
1984 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07001985 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001986 current = set( getResponses[ i ] )
1987 if len( current ) == len( getResponses[ i ] ):
1988 # no repeats
1989 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001990 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001991 " of set " + main.onosSetName + ":\n" +
1992 str( getResponses[ i ] ) )
1993 main.log.debug( "Expected: " + str( main.onosSet ) )
1994 main.log.debug( "Actual: " + str( current ) )
1995 getResults = main.FALSE
1996 else:
1997 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001998 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001999 " set " + main.onosSetName + ":\n" +
2000 str( getResponses[ i ] ) )
2001 getResults = main.FALSE
2002 elif getResponses[ i ] == main.ERROR:
2003 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002004 sizeResponses = main.Cluster.command( "setTestSize",
2005 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002006 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002007 for i in range( len( main.Cluster.active() ) ):
2008 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002009 if size != sizeResponses[ i ]:
2010 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002011 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002012 " for set " + main.onosSetName +
2013 " but got " + str( sizeResponses[ i ] ) )
2014 addAllResults = addAllResults and getResults and sizeResults
2015 utilities.assert_equals( expect=main.TRUE,
2016 actual=addAllResults,
2017 onpass="Set addAll correct",
2018 onfail="Set addAll was incorrect" )
2019
2020 main.step( "Distributed Set clear()" )
2021 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002022 clearResponses = main.Cluster.command( "setTestRemove",
2023 args=[ main.onosSetName, " " ], # Values doesn't matter
2024 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002025 # main.TRUE = successfully changed the set
2026 # main.FALSE = action resulted in no change in set
2027 # main.ERROR - Some error in executing the function
2028 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002029 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002030 if clearResponses[ i ] == main.TRUE:
2031 # All is well
2032 pass
2033 elif clearResponses[ i ] == main.FALSE:
2034 # Nothing set, probably fine
2035 pass
2036 elif clearResponses[ i ] == main.ERROR:
2037 # Error in execution
2038 clearResults = main.FALSE
2039 else:
2040 # unexpected result
2041 clearResults = main.FALSE
2042 if clearResults != main.TRUE:
2043 main.log.error( "Error executing set clear" )
2044
2045 # Check if set is still correct
2046 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002047 getResponses = main.Cluster.command( "setTestGet",
2048 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002049 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002050 for i in range( len( main.Cluster.active() ) ):
2051 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002052 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002053 current = set( getResponses[ i ] )
2054 if len( current ) == len( getResponses[ i ] ):
2055 # no repeats
2056 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002057 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002058 " of set " + main.onosSetName + ":\n" +
2059 str( getResponses[ i ] ) )
2060 main.log.debug( "Expected: " + str( main.onosSet ) )
2061 main.log.debug( "Actual: " + str( current ) )
2062 getResults = main.FALSE
2063 else:
2064 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002065 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002066 " set " + main.onosSetName + ":\n" +
2067 str( getResponses[ i ] ) )
2068 getResults = main.FALSE
2069 elif getResponses[ i ] == main.ERROR:
2070 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002071 sizeResponses = main.Cluster.command( "setTestSize",
2072 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002073 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002074 for i in range( len( main.Cluster.active() ) ):
2075 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002076 if size != sizeResponses[ i ]:
2077 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002078 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002079 " for set " + main.onosSetName +
2080 " but got " + str( sizeResponses[ i ] ) )
2081 clearResults = clearResults and getResults and sizeResults
2082 utilities.assert_equals( expect=main.TRUE,
2083 actual=clearResults,
2084 onpass="Set clear correct",
2085 onfail="Set clear was incorrect" )
2086
2087 main.step( "Distributed Set addAll()" )
2088 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002089 addResponses = main.Cluster.command( "setTestAdd",
2090 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002091 # main.TRUE = successfully changed the set
2092 # main.FALSE = action resulted in no change in set
2093 # main.ERROR - Some error in executing the function
2094 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002095 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002096 if addResponses[ i ] == main.TRUE:
2097 # All is well
2098 pass
2099 elif addResponses[ i ] == main.FALSE:
2100 # Already in set, probably fine
2101 pass
2102 elif addResponses[ i ] == main.ERROR:
2103 # Error in execution
2104 addAllResults = main.FALSE
2105 else:
2106 # unexpected result
2107 addAllResults = main.FALSE
2108 if addAllResults != main.TRUE:
2109 main.log.error( "Error executing set addAll" )
2110
2111 # Check if set is still correct
2112 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002113 getResponses = main.Cluster.command( "setTestGet",
2114 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002115 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002116 for i in range( len( main.Cluster.active() ) ):
2117 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002118 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002119 current = set( getResponses[ i ] )
2120 if len( current ) == len( getResponses[ i ] ):
2121 # no repeats
2122 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002123 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002124 " of set " + main.onosSetName + ":\n" +
2125 str( getResponses[ i ] ) )
2126 main.log.debug( "Expected: " + str( main.onosSet ) )
2127 main.log.debug( "Actual: " + str( current ) )
2128 getResults = main.FALSE
2129 else:
2130 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002131 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002132 " set " + main.onosSetName + ":\n" +
2133 str( getResponses[ i ] ) )
2134 getResults = main.FALSE
2135 elif getResponses[ i ] == main.ERROR:
2136 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002137 sizeResponses = main.Cluster.command( "setTestSize",
2138 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002139 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002140 for i in range( len( main.Cluster.active() ) ):
2141 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002142 if size != sizeResponses[ i ]:
2143 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002144 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002145 " for set " + main.onosSetName +
2146 " but got " + str( sizeResponses[ i ] ) )
2147 addAllResults = addAllResults and getResults and sizeResults
2148 utilities.assert_equals( expect=main.TRUE,
2149 actual=addAllResults,
2150 onpass="Set addAll correct",
2151 onfail="Set addAll was incorrect" )
2152
2153 main.step( "Distributed Set retain()" )
2154 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002155 retainResponses = main.Cluster.command( "setTestRemove",
2156 args=[ main.onosSetName, retainValue ],
2157 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002158 # main.TRUE = successfully changed the set
2159 # main.FALSE = action resulted in no change in set
2160 # main.ERROR - Some error in executing the function
2161 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002162 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002163 if retainResponses[ i ] == main.TRUE:
2164 # All is well
2165 pass
2166 elif retainResponses[ i ] == main.FALSE:
2167 # Already in set, probably fine
2168 pass
2169 elif retainResponses[ i ] == main.ERROR:
2170 # Error in execution
2171 retainResults = main.FALSE
2172 else:
2173 # unexpected result
2174 retainResults = main.FALSE
2175 if retainResults != main.TRUE:
2176 main.log.error( "Error executing set retain" )
2177
2178 # Check if set is still correct
2179 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002180 getResponses = main.Cluster.command( "setTestGet",
2181 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002182 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002183 for i in range( len( main.Cluster.active() ) ):
2184 node = main.Cluster.active()[ i ]
Jon Hallf37d44d2017-05-24 10:37:30 -07002185 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002186 current = set( getResponses[ i ] )
2187 if len( current ) == len( getResponses[ i ] ):
2188 # no repeats
2189 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002190 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002191 " of set " + main.onosSetName + ":\n" +
2192 str( getResponses[ i ] ) )
2193 main.log.debug( "Expected: " + str( main.onosSet ) )
2194 main.log.debug( "Actual: " + str( current ) )
2195 getResults = main.FALSE
2196 else:
2197 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002198 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002199 " set " + main.onosSetName + ":\n" +
2200 str( getResponses[ i ] ) )
2201 getResults = main.FALSE
2202 elif getResponses[ i ] == main.ERROR:
2203 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002204 sizeResponses = main.Cluster.command( "setTestSize",
2205 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002206 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002207 for i in range( len( main.Cluster.active() ) ):
2208 node = main.Cluster.active()[ i ]
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002209 if size != sizeResponses[ i ]:
2210 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002211 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002212 str( size ) + " for set " + main.onosSetName +
2213 " but got " + str( sizeResponses[ i ] ) )
2214 retainResults = retainResults and getResults and sizeResults
2215 utilities.assert_equals( expect=main.TRUE,
2216 actual=retainResults,
2217 onpass="Set retain correct",
2218 onfail="Set retain was incorrect" )
2219
2220 # Transactional maps
2221 main.step( "Partitioned Transactional maps put" )
2222 tMapValue = "Testing"
2223 numKeys = 100
2224 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002225 ctrl = main.Cluster.next()
2226 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002227 if putResponses and len( putResponses ) == 100:
2228 for i in putResponses:
2229 if putResponses[ i ][ 'value' ] != tMapValue:
2230 putResult = False
2231 else:
2232 putResult = False
2233 if not putResult:
2234 main.log.debug( "Put response values: " + str( putResponses ) )
2235 utilities.assert_equals( expect=True,
2236 actual=putResult,
2237 onpass="Partitioned Transactional Map put successful",
2238 onfail="Partitioned Transactional Map put values are incorrect" )
2239
2240 main.step( "Partitioned Transactional maps get" )
2241 # FIXME: is this sleep needed?
2242 time.sleep( 5 )
2243
2244 getCheck = True
2245 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002246 getResponses = main.Cluster.command( "transactionalMapGet",
2247 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002248 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002249 for node in getResponses:
2250 if node != tMapValue:
2251 valueCheck = False
2252 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002253 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002254 main.log.warn( getResponses )
2255 getCheck = getCheck and valueCheck
2256 utilities.assert_equals( expect=True,
2257 actual=getCheck,
2258 onpass="Partitioned Transactional Map get values were correct",
2259 onfail="Partitioned Transactional Map values incorrect" )
2260
2261 # DISTRIBUTED ATOMIC VALUE
2262 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002263 getValues = main.Cluster.command( "valueTestGet",
2264 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002265 main.log.debug( getValues )
2266 # Check the results
2267 atomicValueGetResult = True
2268 expected = valueValue if valueValue is not None else "null"
2269 main.log.debug( "Checking for value of " + expected )
2270 for i in getValues:
2271 if i != expected:
2272 atomicValueGetResult = False
2273 utilities.assert_equals( expect=True,
2274 actual=atomicValueGetResult,
2275 onpass="Atomic Value get successful",
2276 onfail="Error getting atomic Value " +
2277 str( valueValue ) + ", found: " +
2278 str( getValues ) )
2279
2280 main.step( "Atomic Value set()" )
2281 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002282 setValues = main.Cluster.command( "valueTestSet",
2283 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002284 main.log.debug( setValues )
2285 # Check the results
2286 atomicValueSetResults = True
2287 for i in setValues:
2288 if i != main.TRUE:
2289 atomicValueSetResults = False
2290 utilities.assert_equals( expect=True,
2291 actual=atomicValueSetResults,
2292 onpass="Atomic Value set successful",
2293 onfail="Error setting atomic Value" +
2294 str( setValues ) )
2295
2296 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002297 getValues = main.Cluster.command( "valueTestGet",
2298 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002299 main.log.debug( getValues )
2300 # Check the results
2301 atomicValueGetResult = True
2302 expected = valueValue if valueValue is not None else "null"
2303 main.log.debug( "Checking for value of " + expected )
2304 for i in getValues:
2305 if i != expected:
2306 atomicValueGetResult = False
2307 utilities.assert_equals( expect=True,
2308 actual=atomicValueGetResult,
2309 onpass="Atomic Value get successful",
2310 onfail="Error getting atomic Value " +
2311 str( valueValue ) + ", found: " +
2312 str( getValues ) )
2313
2314 main.step( "Atomic Value compareAndSet()" )
2315 oldValue = valueValue
2316 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002317 ctrl = main.Cluster.next()
2318 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002319 main.log.debug( CASValue )
2320 utilities.assert_equals( expect=main.TRUE,
2321 actual=CASValue,
2322 onpass="Atomic Value comapreAndSet successful",
2323 onfail="Error setting atomic Value:" +
2324 str( CASValue ) )
2325
2326 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002327 getValues = main.Cluster.command( "valueTestGet",
2328 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002329 main.log.debug( getValues )
2330 # Check the results
2331 atomicValueGetResult = True
2332 expected = valueValue if valueValue is not None else "null"
2333 main.log.debug( "Checking for value of " + expected )
2334 for i in getValues:
2335 if i != expected:
2336 atomicValueGetResult = False
2337 utilities.assert_equals( expect=True,
2338 actual=atomicValueGetResult,
2339 onpass="Atomic Value get successful",
2340 onfail="Error getting atomic Value " +
2341 str( valueValue ) + ", found: " +
2342 str( getValues ) )
2343
2344 main.step( "Atomic Value getAndSet()" )
2345 oldValue = valueValue
2346 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002347 ctrl = main.Cluster.next()
2348 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002349 main.log.debug( GASValue )
2350 expected = oldValue if oldValue is not None else "null"
2351 utilities.assert_equals( expect=expected,
2352 actual=GASValue,
2353 onpass="Atomic Value GAS successful",
2354 onfail="Error with GetAndSet atomic Value: expected " +
2355 str( expected ) + ", found: " +
2356 str( GASValue ) )
2357
2358 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002359 getValues = main.Cluster.command( "valueTestGet",
2360 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002361 main.log.debug( getValues )
2362 # Check the results
2363 atomicValueGetResult = True
2364 expected = valueValue if valueValue is not None else "null"
2365 main.log.debug( "Checking for value of " + expected )
2366 for i in getValues:
2367 if i != expected:
2368 atomicValueGetResult = False
2369 utilities.assert_equals( expect=True,
2370 actual=atomicValueGetResult,
2371 onpass="Atomic Value get successful",
2372 onfail="Error getting atomic Value: expected " +
2373 str( valueValue ) + ", found: " +
2374 str( getValues ) )
2375
2376 main.step( "Atomic Value destory()" )
2377 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002378 ctrl = main.Cluster.next()
2379 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002380 main.log.debug( destroyResult )
2381 # Check the results
2382 utilities.assert_equals( expect=main.TRUE,
2383 actual=destroyResult,
2384 onpass="Atomic Value destroy successful",
2385 onfail="Error destroying atomic Value" )
2386
2387 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002388 getValues = main.Cluster.command( "valueTestGet",
2389 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002390 main.log.debug( getValues )
2391 # Check the results
2392 atomicValueGetResult = True
2393 expected = valueValue if valueValue is not None else "null"
2394 main.log.debug( "Checking for value of " + expected )
2395 for i in getValues:
2396 if i != expected:
2397 atomicValueGetResult = False
2398 utilities.assert_equals( expect=True,
2399 actual=atomicValueGetResult,
2400 onpass="Atomic Value get successful",
2401 onfail="Error getting atomic Value " +
2402 str( valueValue ) + ", found: " +
2403 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002404
2405 # WORK QUEUES
2406 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002407 ctrl = main.Cluster.next()
2408 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002409 workQueuePending += 1
2410 main.log.debug( addResult )
2411 # Check the results
2412 utilities.assert_equals( expect=main.TRUE,
2413 actual=addResult,
2414 onpass="Work Queue add successful",
2415 onfail="Error adding to Work Queue" )
2416
2417 main.step( "Check the work queue stats" )
2418 statsResults = self.workQueueStatsCheck( workQueueName,
2419 workQueueCompleted,
2420 workQueueInProgress,
2421 workQueuePending )
2422 utilities.assert_equals( expect=True,
2423 actual=statsResults,
2424 onpass="Work Queue stats correct",
2425 onfail="Work Queue stats incorrect " )
2426
2427 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002428 ctrl = main.Cluster.next()
2429 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002430 workQueuePending += 2
2431 main.log.debug( addMultipleResult )
2432 # Check the results
2433 utilities.assert_equals( expect=main.TRUE,
2434 actual=addMultipleResult,
2435 onpass="Work Queue add multiple successful",
2436 onfail="Error adding multiple items to Work Queue" )
2437
2438 main.step( "Check the work queue stats" )
2439 statsResults = self.workQueueStatsCheck( workQueueName,
2440 workQueueCompleted,
2441 workQueueInProgress,
2442 workQueuePending )
2443 utilities.assert_equals( expect=True,
2444 actual=statsResults,
2445 onpass="Work Queue stats correct",
2446 onfail="Work Queue stats incorrect " )
2447
2448 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002449 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002450 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002451 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002452 workQueuePending -= number
2453 workQueueCompleted += number
2454 main.log.debug( take1Result )
2455 # Check the results
2456 utilities.assert_equals( expect=main.TRUE,
2457 actual=take1Result,
2458 onpass="Work Queue takeAndComplete 1 successful",
2459 onfail="Error taking 1 from Work Queue" )
2460
2461 main.step( "Check the work queue stats" )
2462 statsResults = self.workQueueStatsCheck( workQueueName,
2463 workQueueCompleted,
2464 workQueueInProgress,
2465 workQueuePending )
2466 utilities.assert_equals( expect=True,
2467 actual=statsResults,
2468 onpass="Work Queue stats correct",
2469 onfail="Work Queue stats incorrect " )
2470
2471 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002472 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002473 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002474 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002475 workQueuePending -= number
2476 workQueueCompleted += number
2477 main.log.debug( take2Result )
2478 # Check the results
2479 utilities.assert_equals( expect=main.TRUE,
2480 actual=take2Result,
2481 onpass="Work Queue takeAndComplete 2 successful",
2482 onfail="Error taking 2 from Work Queue" )
2483
2484 main.step( "Check the work queue stats" )
2485 statsResults = self.workQueueStatsCheck( workQueueName,
2486 workQueueCompleted,
2487 workQueueInProgress,
2488 workQueuePending )
2489 utilities.assert_equals( expect=True,
2490 actual=statsResults,
2491 onpass="Work Queue stats correct",
2492 onfail="Work Queue stats incorrect " )
2493
2494 main.step( "Work Queue destroy()" )
2495 valueValue = None
2496 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002497 ctrl = main.Cluster.next()
2498 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002499 workQueueCompleted = 0
2500 workQueueInProgress = 0
2501 workQueuePending = 0
2502 main.log.debug( destroyResult )
2503 # Check the results
2504 utilities.assert_equals( expect=main.TRUE,
2505 actual=destroyResult,
2506 onpass="Work Queue destroy successful",
2507 onfail="Error destroying Work Queue" )
2508
2509 main.step( "Check the work queue stats" )
2510 statsResults = self.workQueueStatsCheck( workQueueName,
2511 workQueueCompleted,
2512 workQueueInProgress,
2513 workQueuePending )
2514 utilities.assert_equals( expect=True,
2515 actual=statsResults,
2516 onpass="Work Queue stats correct",
2517 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002518 except Exception as e:
2519 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002520
2521 def cleanUp( self, main ):
2522 """
2523 Clean up
2524 """
2525 import os
2526 import time
2527 assert main.numCtrls, "main.numCtrls not defined"
2528 assert main, "main not defined"
2529 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002530
2531 # printing colors to terminal
2532 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2533 'blue': '\033[94m', 'green': '\033[92m',
2534 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2535 main.case( "Test Cleanup" )
2536 main.step( "Killing tcpdumps" )
2537 main.Mininet2.stopTcpdump()
2538
2539 testname = main.TEST
2540 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2541 main.step( "Copying MN pcap and ONOS log files to test station" )
2542 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2543 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2544 # NOTE: MN Pcap file is being saved to logdir.
2545 # We scp this file as MN and TestON aren't necessarily the same vm
2546
2547 # FIXME: To be replaced with a Jenkin's post script
2548 # TODO: Load these from params
2549 # NOTE: must end in /
2550 logFolder = "/opt/onos/log/"
2551 logFiles = [ "karaf.log", "karaf.log.1" ]
2552 # NOTE: must end in /
2553 for f in logFiles:
Jon Hallca319892017-06-15 15:25:22 -07002554 for ctrl in main.Cluster.controllers:
2555 dstName = main.logdir + "/" + ctrl.name + "-" + f
2556 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002557 logFolder + f, dstName )
2558 # std*.log's
2559 # NOTE: must end in /
2560 logFolder = "/opt/onos/var/"
2561 logFiles = [ "stderr.log", "stdout.log" ]
2562 # NOTE: must end in /
2563 for f in logFiles:
Jon Hallca319892017-06-15 15:25:22 -07002564 for ctrl in main.Cluster.controllers:
2565 dstName = main.logdir + "/" + ctrl.name + "-" + f
2566 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002567 logFolder + f, dstName )
2568 else:
2569 main.log.debug( "skipping saving log files" )
2570
2571 main.step( "Stopping Mininet" )
2572 mnResult = main.Mininet1.stopNet()
2573 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2574 onpass="Mininet stopped",
2575 onfail="MN cleanup NOT successful" )
2576
2577 main.step( "Checking ONOS Logs for errors" )
Jon Hallca319892017-06-15 15:25:22 -07002578 for ctrl in main.Cluster.controllers:
2579 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2580 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002581
2582 try:
2583 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2584 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2585 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2586 timerLog.close()
2587 except NameError as e:
2588 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002589
Devin Lim58046fa2017-07-05 16:55:00 -07002590 def assignMastership( self, main ):
2591 """
2592 Assign mastership to controllers
2593 """
2594 import time
2595 assert main.numCtrls, "main.numCtrls not defined"
2596 assert main, "main not defined"
2597 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002598
2599 main.case( "Assigning Controller roles for switches" )
2600 main.caseExplanation = "Check that ONOS is connected to each " +\
2601 "device. Then manually assign" +\
2602 " mastership to specific ONOS nodes using" +\
2603 " 'device-role'"
2604 main.step( "Assign mastership of switches to specific controllers" )
2605 # Manually assign mastership to the controller we want
2606 roleCall = main.TRUE
2607
2608 ipList = []
2609 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002610 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002611 try:
2612 # Assign mastership to specific controllers. This assignment was
2613 # determined for a 7 node cluser, but will work with any sized
2614 # cluster
2615 for i in range( 1, 29 ): # switches 1 through 28
2616 # set up correct variables:
2617 if i == 1:
2618 c = 0
Jon Hallca319892017-06-15 15:25:22 -07002619 ip = main.Cluster.active()[ c ].ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002620 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2621 elif i == 2:
2622 c = 1 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002623 ip = main.Cluster.active()[ c ].ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002624 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2625 elif i == 3:
2626 c = 1 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002627 ip = main.Cluster.active()[ c ].ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002628 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2629 elif i == 4:
2630 c = 3 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002631 ip = main.Cluster.active()[ c ].ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002632 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2633 elif i == 5:
2634 c = 2 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002635 ip = main.Cluster.active()[ c ].ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002636 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2637 elif i == 6:
2638 c = 2 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002639 ip = main.Cluster.active()[ c ].ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002640 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2641 elif i == 7:
2642 c = 5 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002643 ip = main.Cluster.active()[ c ].ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002644 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2645 elif i >= 8 and i <= 17:
2646 c = 4 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002647 ip = main.Cluster.active()[ c ].ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002648 dpid = '3' + str( i ).zfill( 3 )
2649 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2650 elif i >= 18 and i <= 27:
2651 c = 6 % main.numCtrls
Jon Hallca319892017-06-15 15:25:22 -07002652 ip = main.Cluster.active()[ c ].ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002653 dpid = '6' + str( i ).zfill( 3 )
2654 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2655 elif i == 28:
2656 c = 0
Jon Hallca319892017-06-15 15:25:22 -07002657 ip = main.Cluster.active()[ c ].ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002658 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2659 else:
2660 main.log.error( "You didn't write an else statement for " +
2661 "switch s" + str( i ) )
2662 roleCall = main.FALSE
2663 # Assign switch
2664 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2665 # TODO: make this controller dynamic
2666 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2667 ipList.append( ip )
2668 deviceList.append( deviceId )
2669 except ( AttributeError, AssertionError ):
2670 main.log.exception( "Something is wrong with ONOS device view" )
2671 main.log.info( onosCli.devices() )
2672 utilities.assert_equals(
2673 expect=main.TRUE,
2674 actual=roleCall,
2675 onpass="Re-assigned switch mastership to designated controller",
2676 onfail="Something wrong with deviceRole calls" )
2677
2678 main.step( "Check mastership was correctly assigned" )
2679 roleCheck = main.TRUE
2680 # NOTE: This is due to the fact that device mastership change is not
2681 # atomic and is actually a multi step process
2682 time.sleep( 5 )
2683 for i in range( len( ipList ) ):
2684 ip = ipList[ i ]
2685 deviceId = deviceList[ i ]
2686 # Check assignment
2687 master = onosCli.getRole( deviceId ).get( 'master' )
2688 if ip in master:
2689 roleCheck = roleCheck and main.TRUE
2690 else:
2691 roleCheck = roleCheck and main.FALSE
2692 main.log.error( "Error, controller " + ip + " is not" +
2693 " master " + "of device " +
2694 str( deviceId ) + ". Master is " +
2695 repr( master ) + "." )
2696 utilities.assert_equals(
2697 expect=main.TRUE,
2698 actual=roleCheck,
2699 onpass="Switches were successfully reassigned to designated " +
2700 "controller",
2701 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002702
Devin Lim58046fa2017-07-05 16:55:00 -07002703 def bringUpStoppedNode( self, main ):
2704 """
2705 The bring up stopped nodes
2706 """
2707 import time
2708 assert main.numCtrls, "main.numCtrls not defined"
2709 assert main, "main not defined"
2710 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002711 assert main.kill, "main.kill not defined"
2712 main.case( "Restart minority of ONOS nodes" )
2713
2714 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2715 startResults = main.TRUE
2716 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002717 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002718 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002719 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002720 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2721 onpass="ONOS nodes started successfully",
2722 onfail="ONOS nodes NOT successfully started" )
2723
2724 main.step( "Checking if ONOS is up yet" )
2725 count = 0
2726 onosIsupResult = main.FALSE
2727 while onosIsupResult == main.FALSE and count < 10:
2728 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002729 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002730 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002731 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002732 count = count + 1
2733 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2734 onpass="ONOS restarted successfully",
2735 onfail="ONOS restart NOT successful" )
2736
Jon Hallca319892017-06-15 15:25:22 -07002737 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002738 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002739 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002740 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002741 ctrl.startOnosCli( ctrl.ipAddress )
2742 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002743 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002744 onpass="ONOS node(s) restarted",
2745 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002746
2747 # Grab the time of restart so we chan check how long the gossip
2748 # protocol has had time to work
2749 main.restartTime = time.time() - restartTime
2750 main.log.debug( "Restart time: " + str( main.restartTime ) )
2751 # TODO: MAke this configurable. Also, we are breaking the above timer
2752 main.step( "Checking ONOS nodes" )
2753 nodeResults = utilities.retry( self.nodesCheck,
2754 False,
Jon Hallca319892017-06-15 15:25:22 -07002755 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002756 sleep=15,
2757 attempts=5 )
2758
2759 utilities.assert_equals( expect=True, actual=nodeResults,
2760 onpass="Nodes check successful",
2761 onfail="Nodes check NOT successful" )
2762
2763 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002764 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002765 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002766 ctrl.name,
2767 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002768 main.log.error( "Failed to start ONOS, stopping test" )
2769 main.cleanup()
2770 main.exit()
2771
Jon Hallca319892017-06-15 15:25:22 -07002772 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002773
2774 main.step( "Rerun for election on the node(s) that were killed" )
2775 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002776 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002777 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002778 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002779 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2780 onpass="ONOS nodes reran for election topic",
2781 onfail="Errror rerunning for election" )
2782
Devin Lim58046fa2017-07-05 16:55:00 -07002783 def checkStateAfterONOS( self, main, afterWhich, compareSwitch=False, isRestart=False ):
2784 """
2785 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002786 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002787 1: scaling
2788 """
2789 """
2790 Check state after ONOS failure/scaling
2791 """
2792 import json
2793 assert main.numCtrls, "main.numCtrls not defined"
2794 assert main, "main not defined"
2795 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002796 main.case( "Running ONOS Constant State Tests" )
2797
2798 OnosAfterWhich = [ "failure" , "scaliing" ]
2799
2800 main.step( "Check that each switch has a master" )
2801 # Assert that each device has a master
Jon Hallca319892017-06-15 15:25:22 -07002802 rolesNotNull = all( [ i == main.TRUE for i in main.Cluster.command( "rolesNotNull" ) ] )
Devin Lim58046fa2017-07-05 16:55:00 -07002803 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07002804 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07002805 actual=rolesNotNull,
2806 onpass="Each device has a master",
2807 onfail="Some devices don't have a master assigned" )
2808
2809 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07002810 ONOSMastership = main.Cluster.command( "roles" )
2811 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002812 consistentMastership = True
2813 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07002814 for i in range( len( ONOSMastership ) ):
Jon Hallca319892017-06-15 15:25:22 -07002815 node = str( main.Cluster.active()[ i ] )
Devin Lim58046fa2017-07-05 16:55:00 -07002816 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07002817 main.log.error( "Error in getting " + node + " roles" )
2818 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07002819 repr( ONOSMastership[ i ] ) )
2820 rolesResults = False
2821 utilities.assert_equals(
2822 expect=True,
2823 actual=rolesResults,
2824 onpass="No error in reading roles output",
2825 onfail="Error in reading roles from ONOS" )
2826
2827 main.step( "Check for consistency in roles from each controller" )
2828 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2829 main.log.info(
2830 "Switch roles are consistent across all ONOS nodes" )
2831 else:
2832 consistentMastership = False
2833 utilities.assert_equals(
2834 expect=True,
2835 actual=consistentMastership,
2836 onpass="Switch roles are consistent across all ONOS nodes",
2837 onfail="ONOS nodes have different views of switch roles" )
2838
2839 if rolesResults and not consistentMastership:
2840 for i in range( len( ONOSMastership ) ):
Jon Hallca319892017-06-15 15:25:22 -07002841 node = str( main.Cluster.active()[ i ] )
2842 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002843 json.dumps( json.loads( ONOSMastership[ i ] ),
2844 sort_keys=True,
2845 indent=4,
2846 separators=( ',', ': ' ) ) )
2847
2848 if compareSwitch:
2849 description2 = "Compare switch roles from before failure"
2850 main.step( description2 )
2851 try:
2852 currentJson = json.loads( ONOSMastership[ 0 ] )
2853 oldJson = json.loads( mastershipState )
2854 except ( ValueError, TypeError ):
2855 main.log.exception( "Something is wrong with parsing " +
2856 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002857 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2858 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002859 main.cleanup()
2860 main.exit()
2861 mastershipCheck = main.TRUE
2862 for i in range( 1, 29 ):
2863 switchDPID = str(
2864 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2865 current = [ switch[ 'master' ] for switch in currentJson
2866 if switchDPID in switch[ 'id' ] ]
2867 old = [ switch[ 'master' ] for switch in oldJson
2868 if switchDPID in switch[ 'id' ] ]
2869 if current == old:
2870 mastershipCheck = mastershipCheck and main.TRUE
2871 else:
2872 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2873 mastershipCheck = main.FALSE
2874 utilities.assert_equals(
2875 expect=main.TRUE,
2876 actual=mastershipCheck,
2877 onpass="Mastership of Switches was not changed",
2878 onfail="Mastership of some switches changed" )
2879
2880 # NOTE: we expect mastership to change on controller failure/scaling down
2881 main.step( "Get the intents and compare across all nodes" )
Jon Hallca319892017-06-15 15:25:22 -07002882 ONOSIntents = main.Cluster.command( "intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07002883 intentCheck = main.FALSE
2884 consistentIntents = True
2885 intentsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07002886 for i in range( len( ONOSIntents ) ):
Devin Lim58046fa2017-07-05 16:55:00 -07002887 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07002888 ctrl = main.Cluster.active()[ i ]
2889 main.log.error( "Error in getting " + ctrl.name + " intents" )
2890 main.log.warn( ctrl.name + " intents response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07002891 repr( ONOSIntents[ i ] ) )
2892 intentsResults = False
2893 utilities.assert_equals(
2894 expect=True,
2895 actual=intentsResults,
2896 onpass="No error in reading intents output",
2897 onfail="Error in reading intents from ONOS" )
2898
2899 main.step( "Check for consistency in Intents from each controller" )
2900 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2901 main.log.info( "Intents are consistent across all ONOS " +
2902 "nodes" )
2903 else:
2904 consistentIntents = False
2905
2906 # Try to make it easy to figure out what is happening
2907 #
2908 # Intent ONOS1 ONOS2 ...
2909 # 0x01 INSTALLED INSTALLING
2910 # ... ... ...
2911 # ... ... ...
2912 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002913 for ctrl in main.Cluster.active():
2914 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002915 main.log.warn( title )
2916 # get all intent keys in the cluster
2917 keys = []
2918 for nodeStr in ONOSIntents:
2919 node = json.loads( nodeStr )
2920 for intent in node:
2921 keys.append( intent.get( 'id' ) )
2922 keys = set( keys )
2923 for key in keys:
2924 row = "%-13s" % key
2925 for nodeStr in ONOSIntents:
2926 node = json.loads( nodeStr )
2927 for intent in node:
2928 if intent.get( 'id' ) == key:
2929 row += "%-15s" % intent.get( 'state' )
2930 main.log.warn( row )
2931 # End table view
2932
2933 utilities.assert_equals(
2934 expect=True,
2935 actual=consistentIntents,
2936 onpass="Intents are consistent across all ONOS nodes",
2937 onfail="ONOS nodes have different views of intents" )
2938 intentStates = []
2939 for node in ONOSIntents: # Iter through ONOS nodes
2940 nodeStates = []
2941 # Iter through intents of a node
2942 try:
2943 for intent in json.loads( node ):
2944 nodeStates.append( intent[ 'state' ] )
2945 except ( ValueError, TypeError ):
2946 main.log.exception( "Error in parsing intents" )
2947 main.log.error( repr( node ) )
2948 intentStates.append( nodeStates )
2949 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2950 main.log.info( dict( out ) )
2951
2952 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002953 for i in range( len( main.Cluster.active() ) ):
2954 ctrl = main.Cluster.contoller[ i ]
2955 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002956 main.log.warn( json.dumps(
2957 json.loads( ONOSIntents[ i ] ),
2958 sort_keys=True,
2959 indent=4,
2960 separators=( ',', ': ' ) ) )
2961 elif intentsResults and consistentIntents:
2962 intentCheck = main.TRUE
2963
2964 # NOTE: Store has no durability, so intents are lost across system
2965 # restarts
2966 if not isRestart:
2967 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2968 # NOTE: this requires case 5 to pass for intentState to be set.
2969 # maybe we should stop the test if that fails?
2970 sameIntents = main.FALSE
2971 try:
2972 intentState
2973 except NameError:
2974 main.log.warn( "No previous intent state was saved" )
2975 else:
2976 if intentState and intentState == ONOSIntents[ 0 ]:
2977 sameIntents = main.TRUE
2978 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2979 # TODO: possibly the states have changed? we may need to figure out
2980 # what the acceptable states are
2981 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2982 sameIntents = main.TRUE
2983 try:
2984 before = json.loads( intentState )
2985 after = json.loads( ONOSIntents[ 0 ] )
2986 for intent in before:
2987 if intent not in after:
2988 sameIntents = main.FALSE
2989 main.log.debug( "Intent is not currently in ONOS " +
2990 "(at least in the same form):" )
2991 main.log.debug( json.dumps( intent ) )
2992 except ( ValueError, TypeError ):
2993 main.log.exception( "Exception printing intents" )
2994 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2995 main.log.debug( repr( intentState ) )
2996 if sameIntents == main.FALSE:
2997 try:
2998 main.log.debug( "ONOS intents before: " )
2999 main.log.debug( json.dumps( json.loads( intentState ),
3000 sort_keys=True, indent=4,
3001 separators=( ',', ': ' ) ) )
3002 main.log.debug( "Current ONOS intents: " )
3003 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3004 sort_keys=True, indent=4,
3005 separators=( ',', ': ' ) ) )
3006 except ( ValueError, TypeError ):
3007 main.log.exception( "Exception printing intents" )
3008 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3009 main.log.debug( repr( intentState ) )
3010 utilities.assert_equals(
3011 expect=main.TRUE,
3012 actual=sameIntents,
3013 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
3014 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3015 intentCheck = intentCheck and sameIntents
3016
3017 main.step( "Get the OF Table entries and compare to before " +
3018 "component " + OnosAfterWhich[ afterWhich ] )
3019 FlowTables = main.TRUE
3020 for i in range( 28 ):
3021 main.log.info( "Checking flow table on s" + str( i + 1 ) )
3022 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
3023 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
3024 FlowTables = FlowTables and curSwitch
3025 if curSwitch == main.FALSE:
3026 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
3027 utilities.assert_equals(
3028 expect=main.TRUE,
3029 actual=FlowTables,
3030 onpass="No changes were found in the flow tables",
3031 onfail="Changes were found in the flow tables" )
3032
Jon Hallca319892017-06-15 15:25:22 -07003033 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003034 """
3035 main.step( "Check the continuous pings to ensure that no packets " +
3036 "were dropped during component failure" )
3037 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3038 main.params[ 'TESTONIP' ] )
3039 LossInPings = main.FALSE
3040 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3041 for i in range( 8, 18 ):
3042 main.log.info(
3043 "Checking for a loss in pings along flow from s" +
3044 str( i ) )
3045 LossInPings = main.Mininet2.checkForLoss(
3046 "/tmp/ping.h" +
3047 str( i ) ) or LossInPings
3048 if LossInPings == main.TRUE:
3049 main.log.info( "Loss in ping detected" )
3050 elif LossInPings == main.ERROR:
3051 main.log.info( "There are multiple mininet process running" )
3052 elif LossInPings == main.FALSE:
3053 main.log.info( "No Loss in the pings" )
3054 main.log.info( "No loss of dataplane connectivity" )
3055 utilities.assert_equals(
3056 expect=main.FALSE,
3057 actual=LossInPings,
3058 onpass="No Loss of connectivity",
3059 onfail="Loss of dataplane connectivity detected" )
3060 # NOTE: Since intents are not persisted with IntnentStore,
3061 # we expect loss in dataplane connectivity
3062 LossInPings = main.FALSE
3063 """
3064
3065 def compareTopo( self, main ):
3066 """
3067 Compare topo
3068 """
3069 import json
3070 import time
3071 assert main.numCtrls, "main.numCtrls not defined"
3072 assert main, "main not defined"
3073 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003074 try:
3075 from tests.dependencies.topology import Topology
3076 except ImportError:
3077 main.log.error( "Topology not found exiting the test" )
3078 main.exit()
3079 try:
3080 main.topoRelated
3081 except ( NameError, AttributeError ):
3082 main.topoRelated = Topology()
3083 main.case( "Compare ONOS Topology view to Mininet topology" )
3084 main.caseExplanation = "Compare topology objects between Mininet" +\
3085 " and ONOS"
3086 topoResult = main.FALSE
3087 topoFailMsg = "ONOS topology don't match Mininet"
3088 elapsed = 0
3089 count = 0
3090 main.step( "Comparing ONOS topology to MN topology" )
3091 startTime = time.time()
3092 # Give time for Gossip to work
3093 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3094 devicesResults = main.TRUE
3095 linksResults = main.TRUE
3096 hostsResults = main.TRUE
3097 hostAttachmentResults = True
3098 count += 1
3099 cliStart = time.time()
Jon Hallca319892017-06-15 15:25:22 -07003100 devices = main.topoRelated.getAllDevices( main.Cluster.active(), True,
3101 kwargs={ 'sleep': 5, 'attempts': 5,
3102 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003103 ipResult = main.TRUE
3104
Jon Hallca319892017-06-15 15:25:22 -07003105 hosts = main.topoRelated.getAllHosts( main.Cluster.active(), True,
3106 kwargs={ 'sleep': 5, 'attempts': 5,
3107 'randomTime': True },
3108 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003109
3110 for controller in range( 0, len( hosts ) ):
Jon Hallca319892017-06-15 15:25:22 -07003111 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003112 if hosts[ controller ]:
3113 for host in hosts[ controller ]:
3114 if host is None or host.get( 'ipAddresses', [] ) == []:
3115 main.log.error(
3116 "Error with host ipAddresses on controller" +
3117 controllerStr + ": " + str( host ) )
3118 ipResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003119 ports = main.topoRelated.getAllPorts( main.Cluster.active() , True,
3120 kwargs={ 'sleep': 5, 'attempts': 5,
3121 'randomTime': True } )
3122 links = main.topoRelated.getAllLinks( main.Cluster.active(), True,
3123 kwargs={ 'sleep': 5, 'attempts': 5,
3124 'randomTime': True } )
3125 clusters = main.topoRelated.getAllClusters( main.Cluster.active(), True,
3126 kwargs={ 'sleep': 5, 'attempts': 5,
3127 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003128
3129 elapsed = time.time() - startTime
3130 cliTime = time.time() - cliStart
3131 print "Elapsed time: " + str( elapsed )
3132 print "CLI time: " + str( cliTime )
3133
3134 if all( e is None for e in devices ) and\
3135 all( e is None for e in hosts ) and\
3136 all( e is None for e in ports ) and\
3137 all( e is None for e in links ) and\
3138 all( e is None for e in clusters ):
3139 topoFailMsg = "Could not get topology from ONOS"
3140 main.log.error( topoFailMsg )
3141 continue # Try again, No use trying to compare
3142
3143 mnSwitches = main.Mininet1.getSwitches()
3144 mnLinks = main.Mininet1.getLinks()
3145 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003146 for controller in range( len( main.Cluster.active() ) ):
3147 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003148 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3149 mnSwitches,
3150 devices, ports )
3151 utilities.assert_equals( expect=main.TRUE,
3152 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003153 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003154 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003155 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003156 " Switches view is incorrect" )
3157
3158
3159 currentLinksResult = main.topoRelated.compareBase( links, controller,
3160 main.Mininet1.compareLinks,
3161 [mnSwitches, mnLinks] )
3162 utilities.assert_equals( expect=main.TRUE,
3163 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003164 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003165 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003166 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003167 " links view is incorrect" )
3168 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3169 currentHostsResult = main.Mininet1.compareHosts(
3170 mnHosts,
3171 hosts[ controller ] )
3172 elif hosts[ controller ] == []:
3173 currentHostsResult = main.TRUE
3174 else:
3175 currentHostsResult = main.FALSE
3176 utilities.assert_equals( expect=main.TRUE,
3177 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003178 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003179 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003180 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003181 " hosts don't match Mininet" )
3182 # CHECKING HOST ATTACHMENT POINTS
3183 hostAttachment = True
3184 zeroHosts = False
3185 # FIXME: topo-HA/obelisk specific mappings:
3186 # key is mac and value is dpid
3187 mappings = {}
3188 for i in range( 1, 29 ): # hosts 1 through 28
3189 # set up correct variables:
3190 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3191 if i == 1:
3192 deviceId = "1000".zfill( 16 )
3193 elif i == 2:
3194 deviceId = "2000".zfill( 16 )
3195 elif i == 3:
3196 deviceId = "3000".zfill( 16 )
3197 elif i == 4:
3198 deviceId = "3004".zfill( 16 )
3199 elif i == 5:
3200 deviceId = "5000".zfill( 16 )
3201 elif i == 6:
3202 deviceId = "6000".zfill( 16 )
3203 elif i == 7:
3204 deviceId = "6007".zfill( 16 )
3205 elif i >= 8 and i <= 17:
3206 dpid = '3' + str( i ).zfill( 3 )
3207 deviceId = dpid.zfill( 16 )
3208 elif i >= 18 and i <= 27:
3209 dpid = '6' + str( i ).zfill( 3 )
3210 deviceId = dpid.zfill( 16 )
3211 elif i == 28:
3212 deviceId = "2800".zfill( 16 )
3213 mappings[ macId ] = deviceId
3214 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3215 if hosts[ controller ] == []:
3216 main.log.warn( "There are no hosts discovered" )
3217 zeroHosts = True
3218 else:
3219 for host in hosts[ controller ]:
3220 mac = None
3221 location = None
3222 device = None
3223 port = None
3224 try:
3225 mac = host.get( 'mac' )
3226 assert mac, "mac field could not be found for this host object"
3227
3228 location = host.get( 'locations' )[ 0 ]
3229 assert location, "location field could not be found for this host object"
3230
3231 # Trim the protocol identifier off deviceId
3232 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3233 assert device, "elementId field could not be found for this host location object"
3234
3235 port = location.get( 'port' )
3236 assert port, "port field could not be found for this host location object"
3237
3238 # Now check if this matches where they should be
3239 if mac and device and port:
3240 if str( port ) != "1":
3241 main.log.error( "The attachment port is incorrect for " +
3242 "host " + str( mac ) +
3243 ". Expected: 1 Actual: " + str( port ) )
3244 hostAttachment = False
3245 if device != mappings[ str( mac ) ]:
3246 main.log.error( "The attachment device is incorrect for " +
3247 "host " + str( mac ) +
3248 ". Expected: " + mappings[ str( mac ) ] +
3249 " Actual: " + device )
3250 hostAttachment = False
3251 else:
3252 hostAttachment = False
3253 except AssertionError:
3254 main.log.exception( "Json object not as expected" )
3255 main.log.error( repr( host ) )
3256 hostAttachment = False
3257 else:
3258 main.log.error( "No hosts json output or \"Error\"" +
3259 " in output. hosts = " +
3260 repr( hosts[ controller ] ) )
3261 if zeroHosts is False:
3262 # TODO: Find a way to know if there should be hosts in a
3263 # given point of the test
3264 hostAttachment = True
3265
3266 # END CHECKING HOST ATTACHMENT POINTS
3267 devicesResults = devicesResults and currentDevicesResult
3268 linksResults = linksResults and currentLinksResult
3269 hostsResults = hostsResults and currentHostsResult
3270 hostAttachmentResults = hostAttachmentResults and\
3271 hostAttachment
3272 topoResult = ( devicesResults and linksResults
3273 and hostsResults and ipResult and
3274 hostAttachmentResults )
3275 utilities.assert_equals( expect=True,
3276 actual=topoResult,
3277 onpass="ONOS topology matches Mininet",
3278 onfail=topoFailMsg )
3279 # End of While loop to pull ONOS state
3280
3281 # Compare json objects for hosts and dataplane clusters
3282
3283 # hosts
3284 main.step( "Hosts view is consistent across all ONOS nodes" )
3285 consistentHostsResult = main.TRUE
3286 for controller in range( len( hosts ) ):
Jon Hallca319892017-06-15 15:25:22 -07003287 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003288 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3289 if hosts[ controller ] == hosts[ 0 ]:
3290 continue
3291 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003292 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003293 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003294 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003295 consistentHostsResult = main.FALSE
3296
3297 else:
Jon Hallca319892017-06-15 15:25:22 -07003298 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003299 controllerStr )
3300 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003301 main.log.debug( controllerStr +
3302 " hosts response: " +
3303 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 utilities.assert_equals(
3305 expect=main.TRUE,
3306 actual=consistentHostsResult,
3307 onpass="Hosts view is consistent across all ONOS nodes",
3308 onfail="ONOS nodes have different views of hosts" )
3309
3310 main.step( "Hosts information is correct" )
3311 hostsResults = hostsResults and ipResult
3312 utilities.assert_equals(
3313 expect=main.TRUE,
3314 actual=hostsResults,
3315 onpass="Host information is correct",
3316 onfail="Host information is incorrect" )
3317
3318 main.step( "Host attachment points to the network" )
3319 utilities.assert_equals(
3320 expect=True,
3321 actual=hostAttachmentResults,
3322 onpass="Hosts are correctly attached to the network",
3323 onfail="ONOS did not correctly attach hosts to the network" )
3324
3325 # Strongly connected clusters of devices
3326 main.step( "Clusters view is consistent across all ONOS nodes" )
3327 consistentClustersResult = main.TRUE
3328 for controller in range( len( clusters ) ):
Jon Hallca319892017-06-15 15:25:22 -07003329 controllerStr = str( main.Cluster.active()[ controller ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003330 if "Error" not in clusters[ controller ]:
3331 if clusters[ controller ] == clusters[ 0 ]:
3332 continue
3333 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003334 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003335 controllerStr +
3336 " is inconsistent with ONOS1" )
3337 consistentClustersResult = main.FALSE
3338 else:
3339 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003340 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003341 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003342 main.log.debug( controllerStr +
3343 " clusters response: " +
3344 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003345 utilities.assert_equals(
3346 expect=main.TRUE,
3347 actual=consistentClustersResult,
3348 onpass="Clusters view is consistent across all ONOS nodes",
3349 onfail="ONOS nodes have different views of clusters" )
3350 if not consistentClustersResult:
3351 main.log.debug( clusters )
3352 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003353 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003354
3355 main.step( "There is only one SCC" )
3356 # there should always only be one cluster
3357 try:
3358 numClusters = len( json.loads( clusters[ 0 ] ) )
3359 except ( ValueError, TypeError ):
3360 main.log.exception( "Error parsing clusters[0]: " +
3361 repr( clusters[ 0 ] ) )
3362 numClusters = "ERROR"
3363 clusterResults = main.FALSE
3364 if numClusters == 1:
3365 clusterResults = main.TRUE
3366 utilities.assert_equals(
3367 expect=1,
3368 actual=numClusters,
3369 onpass="ONOS shows 1 SCC",
3370 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3371
3372 topoResult = ( devicesResults and linksResults
3373 and hostsResults and consistentHostsResult
3374 and consistentClustersResult and clusterResults
3375 and ipResult and hostAttachmentResults )
3376
3377 topoResult = topoResult and int( count <= 2 )
3378 note = "note it takes about " + str( int( cliTime ) ) + \
3379 " seconds for the test to make all the cli calls to fetch " +\
3380 "the topology from each ONOS instance"
3381 main.log.info(
3382 "Very crass estimate for topology discovery/convergence( " +
3383 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3384 str( count ) + " tries" )
3385
3386 main.step( "Device information is correct" )
3387 utilities.assert_equals(
3388 expect=main.TRUE,
3389 actual=devicesResults,
3390 onpass="Device information is correct",
3391 onfail="Device information is incorrect" )
3392
3393 main.step( "Links are correct" )
3394 utilities.assert_equals(
3395 expect=main.TRUE,
3396 actual=linksResults,
3397 onpass="Link are correct",
3398 onfail="Links are incorrect" )
3399
3400 main.step( "Hosts are correct" )
3401 utilities.assert_equals(
3402 expect=main.TRUE,
3403 actual=hostsResults,
3404 onpass="Hosts are correct",
3405 onfail="Hosts are incorrect" )
3406
3407 # FIXME: move this to an ONOS state case
3408 main.step( "Checking ONOS nodes" )
3409 nodeResults = utilities.retry( self.nodesCheck,
3410 False,
Jon Hallca319892017-06-15 15:25:22 -07003411 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003412 attempts=5 )
3413 utilities.assert_equals( expect=True, actual=nodeResults,
3414 onpass="Nodes check successful",
3415 onfail="Nodes check NOT successful" )
3416 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003417 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003418 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003419 ctrl.name,
3420 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003421
3422 if not topoResult:
3423 main.cleanup()
3424 main.exit()
Jon Hallca319892017-06-15 15:25:22 -07003425
Devin Lim58046fa2017-07-05 16:55:00 -07003426 def linkDown( self, main, fromS="s3", toS="s28" ):
3427 """
3428 Link fromS-toS down
3429 """
3430 import time
3431 assert main.numCtrls, "main.numCtrls not defined"
3432 assert main, "main not defined"
3433 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003434 # NOTE: You should probably run a topology check after this
3435
3436 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3437
3438 description = "Turn off a link to ensure that Link Discovery " +\
3439 "is working properly"
3440 main.case( description )
3441
3442 main.step( "Kill Link between " + fromS + " and " + toS )
3443 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3444 main.log.info( "Waiting " + str( linkSleep ) +
3445 " seconds for link down to be discovered" )
3446 time.sleep( linkSleep )
3447 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3448 onpass="Link down successful",
3449 onfail="Failed to bring link down" )
3450 # TODO do some sort of check here
3451
3452 def linkUp( self, main, fromS="s3", toS="s28" ):
3453 """
3454 Link fromS-toS up
3455 """
3456 import time
3457 assert main.numCtrls, "main.numCtrls not defined"
3458 assert main, "main not defined"
3459 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003460 # NOTE: You should probably run a topology check after this
3461
3462 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3463
3464 description = "Restore a link to ensure that Link Discovery is " + \
3465 "working properly"
3466 main.case( description )
3467
3468 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
3469 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3470 main.log.info( "Waiting " + str( linkSleep ) +
3471 " seconds for link up to be discovered" )
3472 time.sleep( linkSleep )
3473 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3474 onpass="Link up successful",
3475 onfail="Failed to bring link up" )
3476
3477 def switchDown( self, main ):
3478 """
3479 Switch Down
3480 """
3481 # NOTE: You should probably run a topology check after this
3482 import time
3483 assert main.numCtrls, "main.numCtrls not defined"
3484 assert main, "main not defined"
3485 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003486
3487 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3488
3489 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003490 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003491 main.case( description )
3492 switch = main.params[ 'kill' ][ 'switch' ]
3493 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3494
3495 # TODO: Make this switch parameterizable
3496 main.step( "Kill " + switch )
3497 main.log.info( "Deleting " + switch )
3498 main.Mininet1.delSwitch( switch )
3499 main.log.info( "Waiting " + str( switchSleep ) +
3500 " seconds for switch down to be discovered" )
3501 time.sleep( switchSleep )
3502 device = onosCli.getDevice( dpid=switchDPID )
3503 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003504 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003505 result = main.FALSE
3506 if device and device[ 'available' ] is False:
3507 result = main.TRUE
3508 utilities.assert_equals( expect=main.TRUE, actual=result,
3509 onpass="Kill switch successful",
3510 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003511
Devin Lim58046fa2017-07-05 16:55:00 -07003512 def switchUp( self, main ):
3513 """
3514 Switch Up
3515 """
3516 # NOTE: You should probably run a topology check after this
3517 import time
3518 assert main.numCtrls, "main.numCtrls not defined"
3519 assert main, "main not defined"
3520 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003521
3522 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3523 switch = main.params[ 'kill' ][ 'switch' ]
3524 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3525 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003526 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003527 description = "Adding a switch to ensure it is discovered correctly"
3528 main.case( description )
3529
3530 main.step( "Add back " + switch )
3531 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3532 for peer in links:
3533 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003534 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003535 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3536 main.log.info( "Waiting " + str( switchSleep ) +
3537 " seconds for switch up to be discovered" )
3538 time.sleep( switchSleep )
3539 device = onosCli.getDevice( dpid=switchDPID )
3540 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003541 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003542 result = main.FALSE
3543 if device and device[ 'available' ]:
3544 result = main.TRUE
3545 utilities.assert_equals( expect=main.TRUE, actual=result,
3546 onpass="add switch successful",
3547 onfail="Failed to add switch?" )
3548
3549 def startElectionApp( self, main ):
3550 """
3551 start election app on all onos nodes
3552 """
3553 assert main.numCtrls, "main.numCtrls not defined"
3554 assert main, "main not defined"
3555 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003556
3557 main.case( "Start Leadership Election app" )
3558 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003559 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003560 appResult = onosCli.activateApp( "org.onosproject.election" )
3561 utilities.assert_equals(
3562 expect=main.TRUE,
3563 actual=appResult,
3564 onpass="Election app installed",
3565 onfail="Something went wrong with installing Leadership election" )
3566
3567 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003568 onosCli.electionTestRun()
3569 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003570 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003571 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003572 utilities.assert_equals(
3573 expect=True,
3574 actual=sameResult,
3575 onpass="All nodes see the same leaderboards",
3576 onfail="Inconsistent leaderboards" )
3577
3578 if sameResult:
3579 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003580 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003581 correctLeader = True
3582 else:
3583 correctLeader = False
3584 main.step( "First node was elected leader" )
3585 utilities.assert_equals(
3586 expect=True,
3587 actual=correctLeader,
3588 onpass="Correct leader was elected",
3589 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003590 main.Cluster.testLeader = leader
3591
Devin Lim58046fa2017-07-05 16:55:00 -07003592 def isElectionFunctional( self, main ):
3593 """
3594 Check that Leadership Election is still functional
3595 15.1 Run election on each node
3596 15.2 Check that each node has the same leaders and candidates
3597 15.3 Find current leader and withdraw
3598 15.4 Check that a new node was elected leader
3599 15.5 Check that that new leader was the candidate of old leader
3600 15.6 Run for election on old leader
3601 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3602 15.8 Make sure that the old leader was added to the candidate list
3603
3604 old and new variable prefixes refer to data from before vs after
3605 withdrawl and later before withdrawl vs after re-election
3606 """
3607 import time
3608 assert main.numCtrls, "main.numCtrls not defined"
3609 assert main, "main not defined"
3610 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003611
3612 description = "Check that Leadership Election is still functional"
3613 main.case( description )
3614 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3615
3616 oldLeaders = [] # list of lists of each nodes' candidates before
3617 newLeaders = [] # list of lists of each nodes' candidates after
3618 oldLeader = '' # the old leader from oldLeaders, None if not same
3619 newLeader = '' # the new leaders fron newLoeaders, None if not same
3620 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3621 expectNoLeader = False # True when there is only one leader
Jon Hallca319892017-06-15 15:25:22 -07003622 if len( main.Cluster.controllers ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003623 expectNoLeader = True
3624
3625 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003626 electionResult = all( [ i == main.TRUE for i in main.Cluster.command( "electionTestRun" ) ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003627 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003628 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003629 actual=electionResult,
3630 onpass="All nodes successfully ran for leadership",
3631 onfail="At least one node failed to run for leadership" )
3632
3633 if electionResult == main.FALSE:
3634 main.log.error(
3635 "Skipping Test Case because Election Test App isn't loaded" )
3636 main.skipCase()
3637
3638 main.step( "Check that each node shows the same leader and candidates" )
3639 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003640 activeCLIs = main.Cluster.active()
3641 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003642 if sameResult:
3643 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003644 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003645 else:
3646 oldLeader = None
3647 utilities.assert_equals(
3648 expect=True,
3649 actual=sameResult,
3650 onpass="Leaderboards are consistent for the election topic",
3651 onfail=failMessage )
3652
3653 main.step( "Find current leader and withdraw" )
3654 withdrawResult = main.TRUE
3655 # do some sanity checking on leader before using it
3656 if oldLeader is None:
3657 main.log.error( "Leadership isn't consistent." )
3658 withdrawResult = main.FALSE
3659 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003660 for ctrl in main.Cluster.active():
3661 if oldLeader == ctrl.ipAddress:
3662 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003663 break
3664 else: # FOR/ELSE statement
3665 main.log.error( "Leader election, could not find current leader" )
3666 if oldLeader:
3667 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3668 utilities.assert_equals(
3669 expect=main.TRUE,
3670 actual=withdrawResult,
3671 onpass="Node was withdrawn from election",
3672 onfail="Node was not withdrawn from election" )
3673
3674 main.step( "Check that a new node was elected leader" )
3675 failMessage = "Nodes have different leaders"
3676 # Get new leaders and candidates
3677 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3678 newLeader = None
3679 if newLeaderResult:
3680 if newLeaders[ 0 ][ 0 ] == 'none':
3681 main.log.error( "No leader was elected on at least 1 node" )
3682 if not expectNoLeader:
3683 newLeaderResult = False
3684 newLeader = newLeaders[ 0 ][ 0 ]
3685
3686 # Check that the new leader is not the older leader, which was withdrawn
3687 if newLeader == oldLeader:
3688 newLeaderResult = False
3689 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3690 " as the current leader" )
3691 utilities.assert_equals(
3692 expect=True,
3693 actual=newLeaderResult,
3694 onpass="Leadership election passed",
3695 onfail="Something went wrong with Leadership election" )
3696
3697 main.step( "Check that that new leader was the candidate of old leader" )
3698 # candidates[ 2 ] should become the top candidate after withdrawl
3699 correctCandidateResult = main.TRUE
3700 if expectNoLeader:
3701 if newLeader == 'none':
3702 main.log.info( "No leader expected. None found. Pass" )
3703 correctCandidateResult = main.TRUE
3704 else:
3705 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3706 correctCandidateResult = main.FALSE
3707 elif len( oldLeaders[ 0 ] ) >= 3:
3708 if newLeader == oldLeaders[ 0 ][ 2 ]:
3709 # correct leader was elected
3710 correctCandidateResult = main.TRUE
3711 else:
3712 correctCandidateResult = main.FALSE
3713 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3714 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3715 else:
3716 main.log.warn( "Could not determine who should be the correct leader" )
3717 main.log.debug( oldLeaders[ 0 ] )
3718 correctCandidateResult = main.FALSE
3719 utilities.assert_equals(
3720 expect=main.TRUE,
3721 actual=correctCandidateResult,
3722 onpass="Correct Candidate Elected",
3723 onfail="Incorrect Candidate Elected" )
3724
3725 main.step( "Run for election on old leader( just so everyone " +
3726 "is in the hat )" )
3727 if oldLeaderCLI is not None:
3728 runResult = oldLeaderCLI.electionTestRun()
3729 else:
3730 main.log.error( "No old leader to re-elect" )
3731 runResult = main.FALSE
3732 utilities.assert_equals(
3733 expect=main.TRUE,
3734 actual=runResult,
3735 onpass="App re-ran for election",
3736 onfail="App failed to run for election" )
3737
3738 main.step(
3739 "Check that oldLeader is a candidate, and leader if only 1 node" )
3740 # verify leader didn't just change
3741 # Get new leaders and candidates
3742 reRunLeaders = []
3743 time.sleep( 5 ) # Paremterize
3744 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3745
3746 # Check that the re-elected node is last on the candidate List
3747 if not reRunLeaders[ 0 ]:
3748 positionResult = main.FALSE
3749 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3750 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3751 str( reRunLeaders[ 0 ] ) ) )
3752 positionResult = main.FALSE
3753 utilities.assert_equals(
3754 expect=True,
3755 actual=positionResult,
3756 onpass="Old leader successfully re-ran for election",
3757 onfail="Something went wrong with Leadership election after " +
3758 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003759
Devin Lim58046fa2017-07-05 16:55:00 -07003760 def installDistributedPrimitiveApp( self, main ):
3761 """
3762 Install Distributed Primitives app
3763 """
3764 import time
3765 assert main.numCtrls, "main.numCtrls not defined"
3766 assert main, "main not defined"
3767 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003768
3769 # Variables for the distributed primitives tests
3770 main.pCounterName = "TestON-Partitions"
3771 main.pCounterValue = 0
3772 main.onosSet = set( [] )
3773 main.onosSetName = "TestON-set"
3774
3775 description = "Install Primitives app"
3776 main.case( description )
3777 main.step( "Install Primitives app" )
3778 appName = "org.onosproject.distributedprimitives"
Jon Hallca319892017-06-15 15:25:22 -07003779 appResults = main.Cluster.next().activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003780 utilities.assert_equals( expect=main.TRUE,
3781 actual=appResults,
3782 onpass="Primitives app activated",
3783 onfail="Primitives app not activated" )
3784 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003785 time.sleep( 5 ) # To allow all nodes to activate