blob: 89f202f06be3a95701bc1906a4871ed2101534e0 [file] [log] [blame]
srikanth116e6e82014-08-19 07:22:37 -07001#!/usr/bin/env python
2#
3# Copyright (c) 2013 Big Switch Networks, Inc.
4#
5# Licensed under the Eclipse Public License, Version 1.0 (the
6# "License"); you may not use this file except in compliance with the
7# License. You may obtain a copy of the License at
8#
9# http://www.eclipse.org/legal/epl-v10.html
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
14# implied. See the License for the specific language governing
15# permissions and limitations under the License.
16#
17
18import json
19import logging
20import sys
21import time
22import random
23import urllib2
24import re
25import urllib
26from optparse import OptionParser, OptionGroup
27
28DEFAULT_CONTROLLER_ID = 'localhost'
29
30DEFAULT_BATCH = 5
31DEFAULT_SAMPLING_INTERVAL = 5
32
33STATS_CPU = 'cpu'
34STATS_MEM = 'mem'
35STATS_SWAP = 'swap'
36STATS_OF = 'of'
37STATS_LOG = 'log'
38
39STATS = [STATS_CPU, STATS_MEM, STATS_SWAP, STATS_OF, STATS_LOG]
40
41BASE_TIME = 1297278987000
42SECONDS_CONVERT = 1000
43MINUTES_CONVERT = 60 * SECONDS_CONVERT
44HOURS_CONVERT = 60 * MINUTES_CONVERT
45DAYS_CONVERT = 24 * HOURS_CONVERT
46
47numSamples = 0
48samplingInterval = 0
49batchSize = 0
50statsTypes = []
51numSwitches = 0
52switchIDs = []
53seed = 0
54controller = ''
55logger = 0
56debugLevel = logging.INFO
57logfile = 'filler.log'
58
59def initLogger():
60 # create a statFiller logger
61 logger = logging.getLogger("filler")
62 logger.setLevel(debugLevel)
63
64 formatter = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s %(message)s")
65 # Add a file handler
66 rf_handler = logging.FileHandler(logfile)
67 rf_handler.setFormatter(formatter)
68 logger.addHandler(rf_handler)
69
70 # Add a console handler
71 co_handler = logging.StreamHandler()
72 co_handler.setFormatter(formatter)
73 co_handler.setLevel(logging.WARNING)
74 logger.addHandler(co_handler)
75
76 return logger
77
78def intToDpid(intValue):
79 import string
80 ret = []
81 if intValue > (2**128-1):
82 intValue = 2**128-1
83 for i in range(16):
84 mask = 0x0f<<(i*4)
85 tmp = (intValue&mask)>>(i*4)
86 ret.insert(0,string.hexdigits[tmp])
87 if i != 15 and i%2 == 1:
88 ret.insert(0, ':')
89
90 return ''.join(ret)
91
92def getRandomInc():
93 """
94 Randomly create an integer from 1 to 100
95 """
96 return random.randint(1,100)
97
98def getRandomInt(max):
99 """
100 Randomly create an integer from 1 to max
101 """
102 if max <= 1:
103 return 1
104 else:
105 return random.randint(1,max)
106
107def getRandomPercentage(max):
108 """
109 Randomly create a two-decimal percentage
110 """
111 percentMax = int(max * 100)
112 if percentMax < 2:
113 return 0.01
114 else:
115 try:
116 v = float(random.randint(1, percentMax))
117 return v/100
118 except ValueError as e:
119 logger.error ("error: %s, percentMax=%d"%(e, percentMax))
120 return 0.01
121
122class StatsFiller():
123
124 statsData = {}
125 logData = {}
126
127 of_packetin = 0
128 of_flowmod = 0
129 of_activeflow = 0
130 hostID = 'localhost'
131
132 def __init__(self, numSamples, samplingInterval, batchSize, switchIDs, statsTypes, hostID, cluster, components, seed, logger):
133 self.numSamples = numSamples
134 self.samplingInterval = samplingInterval
135 self.batchSize = batchSize
136 self.switchIDs = switchIDs
137 self.statsTypes = statsTypes
138 self.controllerID = hostID
139 self.cluster = cluster
140 self.components = components
141 self.seed = seed
142 self.logger = logger
143
144 def repr(self):
145 return str(self.statsData)
146
147 def initStatsData(self):
148 if STATS_CPU in self.statsTypes or STATS_MEM in self.statsTypes or STATS_SWAP in self.statsTypes:
149 self.statsData['controller-stats'] = {}
150 self.statsData['controller-stats'][self.hostID] = {}
151
152 if STATS_CPU in self.statsTypes:
153 self.statsData['controller-stats'][self.hostID]['cpu-idle'] = []
154 self.statsData['controller-stats'][self.hostID]['cpu-nice'] = []
155 self.statsData['controller-stats'][self.hostID]['cpu-user'] = []
156 self.statsData['controller-stats'][self.hostID]['cpu-system'] = []
157 if STATS_MEM in self.statsTypes:
158 self.statsData['controller-stats'][self.hostID]['mem-used'] = []
159 self.statsData['controller-stats'][self.hostID]['mem-free'] = []
160 if STATS_SWAP in self.statsTypes:
161 self.statsData['controller-stats'][self.hostID]['swap-used'] = []
162
163 if STATS_OF in self.statsTypes:
164 self.statsData['switch-stats'] = {}
165 for dpid in switchIDs:
166 self.statsData['switch-stats'][dpid] = {}
167
168 if STATS_OF in self.statsTypes:
169 for dpid in switchIDs:
170 self.statsData['switch-stats'][dpid]['OFPacketIn'] = []
171 self.statsData['switch-stats'][dpid]['OFFlowMod'] = []
172 self.statsData['switch-stats'][dpid]['OFActiveFlow'] = []
173
174 if STATS_LOG in self.statsTypes:
175 self.logData[self.hostID] = []
176
177 def generate_a_sw_stat(self, timestamp, dpid, statsTypes, value):
178 sample = {'timestamp':timestamp, 'value':value}
179 self.statsData['switch-stats'][dpid][statsTypes].append(sample)
180
181 def generate_a_controller_stat(self, timestamp, statsTypes, value):
182 sample = {'timestamp':timestamp, 'value':value}
183 self.statsData['controller-stats'][self.hostID][statsTypes].append(sample)
184
185 def generate_log_event(self, timestamp, component, log_level, message):
186 event = {'timestamp':timestamp, 'component':component, 'log-level':log_level,'message':message}
187 self.logData[self.hostID].append(event)
188
189 def generate_a_batch(self, startTime, batchSize):
190 for i in range(batchSize):
191 # Get the sample timestamp in ms
192 ts = int(startTime + i * self.samplingInterval)*1000
193 # controller stats
194 if STATS_CPU in self.statsTypes:
195 max = 100.00
196 v = getRandomPercentage(max)
197 self.generate_a_controller_stat(ts, 'cpu-idle', round(v, 2))
198 max -= v
199 v = getRandomPercentage(max)
200 self.generate_a_controller_stat(ts, 'cpu-nice', round(v, 2))
201 max -= v
202 v = getRandomPercentage(max)
203 self.generate_a_controller_stat(ts, 'cpu-user', round(v, 2))
204 max -= v
205 self.generate_a_controller_stat(ts, 'cpu-system', round(v, 2))
206 if STATS_MEM in self.statsTypes:
207 max = getRandomInt(1000000000)
208 v = getRandomInt(max)
209 self.generate_a_controller_stat(ts, 'mem-used', v)
210 max -= v
211 self.generate_a_controller_stat(ts, 'mem-free', max)
212 if STATS_SWAP in self.statsTypes:
213 max = getRandomInt(1000000000)
214 v = getRandomInt(max)
215 self.generate_a_controller_stat(ts, 'swap-used', v)
216
217 # switch stats
218 if STATS_OF in self.statsTypes:
219 for dpid in self.switchIDs:
220 #print "add stats for %s"%dpid
221 self.of_packetin = getRandomInt(100)
222 self.generate_a_sw_stat(ts, dpid, 'OFPacketIn', self.of_packetin)
223 self.of_flowmod = getRandomInt(100)
224 self.generate_a_sw_stat(ts, dpid, 'OFFlowMod', self.of_flowmod)
225 self.of_activeflow = getRandomInt(100)
226 self.generate_a_sw_stat(ts, dpid, 'OFActiveFlow', self.of_activeflow)
227
228 if STATS_LOG in self.statsTypes:
229 for component in components:
230 self.generate_log_event(ts, component, 'Error', 'Another log message')
231
232 def constructRestRrls(self):
233 """
234 Construct the REST URL for the given host/statsPath, including
235 the items in the query_params dictionary as URL-encoded query parameters
236 """
237 self.statsUrl = 'http://%s:8000/rest/v1/stats/data/%s'%(self.controllerID, self.cluster)
238 self.logUrl = 'http://%s:8000/rest/v1/events/data/%s'%(self.controllerID, self.cluster)
239
240 def printRestErrorInfo(self, e):
241 """
242 Extract the error information and print it.
243 This is mainly intended to demonstrate how to extract the
244 error info from the exception. It may or may not make sense
245 to print out this information, depending on the application.
246 """
247 # Extract the info from the exception
248 error_code = e.getcode()
249 response_text = e.read()
250 try:
251 # Print the error info
252 logger.error('HTTP error: code = %d, %s'%(error_code, response_text))
253
254 obj = json.loads(response_text)
255 error_type = obj.get('error_type')
256 description = obj.get('description')
257
258 # Print the error info
259 logger.error('HTTP error code = %d; error_type = %s; description = %s'%(error_code, str(error_type), description))
260
261 # Print the optional validation error info
262 model_error = obj.get('model_error')
263 if model_error:
264 logger.error('model_error = %s'%str(model_error))
265 field_errors = obj.get('field_errors')
266 if field_errors:
267 logger.error('field_errors = %s'%str(field_errors))
268 except ValueError as e:
269 logger.error(e)
270
271
272 def putRestData(self, url, obj):
273 """
274 Put the given object data to the given type/id/params at the given host.
275 If both the id and query_param_dict are empty, then a new item is created.
276 Otherwise, existing data items are updated with the object data.
277 """
278
279 logger.debug("URL: %s"%url)
280 logger.debug("Sending: %s"%obj)
281 request = urllib2.Request(url, obj, {'Content-Type':'application/json'})
282 request.get_method = lambda: 'PUT'
283 try:
284 response = urllib2.urlopen(request)
285 ret = response.read()
286 logger.debug("Got response: %s"%str(ret))
287 return ret
288 except urllib2.HTTPError as e:
289 logger.error("Got Exception: %s"%str(e))
290 self.printRestErrorInfo(e)
291
292
293 def postData(self):
294 """
295 Put the given object data to the given type/id/params at the given host.
296 """
297
298 self.constructRestRrls()
299
300 if self.statsData:
301 output = json.JSONEncoder().encode(self.statsData)
302 retMsg = self.putRestData(self.statsUrl, output)
303 logger.info("Put rest call for stats data returns: %s"%retMsg)
304 if self.logData:
305 output = json.JSONEncoder().encode(self.logData)
306 retMsg = self.putRestData(self.logUrl, output)
307 logger.info("Put rest call for log data returns: %s"%retMsg)
308
309 def fill(self):
310 endTime = time.time()
311 startTime = endTime - self.numSamples * self.samplingInterval
312 remainingSamples = self.numSamples
313 batchSize = 0
314 while remainingSamples > 0:
315 logger.info("starttime = %s(%d), endtime = %s(%d)"%(time.ctime(startTime),startTime,time.ctime(endTime),endTime))
316 self.initStatsData()
317 if remainingSamples < self.batchSize:
318 batchSize = remainingSamples
319 else:
320 batchSize = self.batchSize
321 remainingSamples -= batchSize
322 self.generate_a_batch(startTime, batchSize)
323 startTime += self.samplingInterval * batchSize
324 self.postData()
325 sys.stdout.write("%0.2f%%\r"%(float(self.numSamples-remainingSamples)*100/self.numSamples))
326
327def parseLogLevel(level):
328 if 'debug'.startswith(level):
329 return logging.DEBUG
330 elif 'info'.startswith(level):
331 return logging.INFO
332 elif 'warning'.startswith(level):
333 return logging.WARNING
334 elif 'error'.startswith(level):
335 return logging.ERROR
336 elif 'critical'.startswith(level):
337 return logging.CRITICAL
338 else:
339 return None
340
341def processOptions(options):
342 """
343 Process the command line arguments
344 """
345
346 global numSamples
347 global samplingInterval
348 global batchSize
349 global statsTypes
350 global numSwitches
351 global switchIDs
352 global seed
353 global controller
354 global cluster
355 global components
356 global debugLevel
357 global logfile
358
359 if options.numSamples:
360 numSamples = options.numSamples
361
362 if options.period:
363 m = re.search("([0-9]*)([A-Za-z]*)$", options.period)
364 (value, unit) = m.groups()
365 if value:
366 value = int(value)
367 if unit:
368 if 'minutes'.startswith(unit):
369 value = 60*value
370 elif 'hours'.startswith(unit):
371 value = 60*60*value
372 elif 'days'.startswith(unit):
373 value = 24*60*60*value
374 elif not 'seconds'.startswith(unit):
375 raise ValueError("Invalid period: %s"%options.period)
376 numSamples = value
377
378 if options.sampleInterval:
379 samplingInterval = options.sampleInterval
380 else:
381 samplingInterval = DEFAULT_SAMPLING_INTERVAL
382
383 numSamples /= samplingInterval
384
385 if options.batchSize:
386 batchSize = options.batchSize
387 else:
388 batchSize = DEFAULT_BATCH
389
390 if options.numSwitches:
391 numSwitches = options.numSwitches
392
393 if options.statsTypes:
394 statsTypes = options.statsTypes.split(',')
395 for stat in statsTypes:
396 if stat not in STATS:
397 raise ValueError("Invalid stat: %s"%stat)
398
399 if options.seed:
400 seed = options.seed
401 else:
402 seed = random.random()
403
404 if options.controller:
405 controller = options.controller
406 else:
407 controller = 'localhost'
408
409 if options.cluster:
410 cluster = options.cluster
411 else:
412 cluster = 'default'
413
414 components = options.components.split(',')
415
416 if options.debugLevel:
417 debugLevel = parseLogLevel(options.debugLevel)
418 else:
419 debugLevel = logging.INFO
420
421 if not debugLevel:
422 raise ValueError("Incorrect debug level, %s."%options.debugLevel)
423
424 if options.logfile:
425 logfile = options.logfile
426 else:
427 logfile = 'filler.log'
428
429
430 if len(statsTypes) == 0:
431 raise ValueError("statsTypes is required.")
432
433 if STATS_OF in statsTypes:
434 if numSwitches == 0:
435 raise ValueError("numSwitches must be nonzero to generate of stats.")
436 else:
437 for i in range(numSwitches):
438 switchIDs.append(intToDpid(i))
439
440 if numSamples == 0:
441 raise ValueError("numSamples or period is required")
442
443
444
445if __name__ == '__main__':
446 parser = OptionParser()
447 group = OptionGroup(parser, "Commonly Used Options")
448 group.add_option("-n", "--numSamples", dest="numSamples", type="long", help="Number of samples to be generated. Can NOT be used with timePeriod option.")
449 group.add_option("-p", "--timePeriod", dest="period", help="The time period to fill the stats data. "
450 "The format can be in seconds, minutes, hours, or days. e.g. 100s(econds), 15m(inutes), 2h(ours), 3d(ays). "
451 "Can NOT be used with numSamples option.")
452 group.add_option("-t", "--samplingInterval", dest="sampleInterval", type = "int", help="The sampling interval in seconds")
453 group.add_option("-b", "--batchSize", dest="batchSize", type = "int", help="The number of samples for each rest put")
454 group.add_option("-s", "--numSwitches", dest="numSwitches", type = "int", help="The number of switches for OF stats. The dpids start with "
455 "00:00:00:00:00:00:00:01 and increment to the number of switches.")
456 group.add_option("-m", "--statsTypes", dest="statsTypes", help="A comma separated statsTypes, Options are cpu, mem, swap, of, and log."
457 " e.g. cpu,mem")
458 group.add_option("-c", "--controller", dest="controller", help="The IP address of the controller")
459 group.add_option("-u", "--cluster", dest="cluster", help="cluster ID")
460 group.add_option("-z", "--components", dest="components", default="sdnplatform,cassandra", help="A comma-separated list of component names for log events")
461 parser.add_option_group(group)
462
463 lc_group = OptionGroup(parser, "Less Commonly Used Options")
464 lc_group.add_option("-r", "--seed", dest="seed", type = "int", help="Same data can be recreated by setting the same seed for the randomizer")
465 lc_group.add_option("-d", "--debugLevel", dest="debugLevel", help="Set the log level for logging: debug, info, warning, critical, error")
466 lc_group.add_option("-f", "--logfile", dest="logfile", help="The logfile that keeps the logs. Default is filler.log")
467 parser.add_option_group(lc_group)
468
469 (options, args) = parser.parse_args()
470 if len(args) != 0:
471 parser.error("incorrect number of arguments: %s"%args)
472
473
474 try:
475 processOptions(options)
476 logger = initLogger()
477 logger.debug("numSample:%d, samplingInterval:%d, batchSize=%d, statsTypes=%s, numSwitches=%d switchIDs=%s seed=%f cluster=%s components=%s"%
478 (numSamples, samplingInterval, batchSize, statsTypes, numSwitches, switchIDs, seed, cluster, components))
479 except ValueError as e:
480 print("Error: %s"%e)
481 sys.exit()
482
483 filler = StatsFiller(numSamples, samplingInterval, batchSize, switchIDs, statsTypes, controller, cluster, components, seed, logger)
484 filler.fill()