| #!/usr/bin/env python |
| """ |
| usage: atomix-gen-config [-h] [-s PARTITION_SIZE] [-n NUM_PARTITIONS] |
| [node_ip] [filename] [node_ip [node_ip ...]] |
| |
| Generate the partitions json file given a list of IPs or from the $OCC* |
| environment variables. |
| |
| positional arguments: |
| filename File to write output to. If none is provided, output |
| is written to stdout. |
| node_ip IP Address(es) of the node(s) in the cluster. If no |
| IPs are given, will use the $OCC* environment |
| variables. NOTE: these arguemnts are only processed |
| after the filename argument. |
| |
| optional arguments: |
| -h, --help show this help message and exit |
| -s PARTITION_SIZE, --partition-size PARTITION_SIZE |
| Number of nodes per partition. Note that partition |
| sizes smaller than 3 are not fault tolerant. Defaults |
| to 3. |
| -n NUM_PARTITIONS, --num-partitions NUM_PARTITIONS |
| Number of partitions. Defaults to the number of nodes |
| in the cluster. |
| """ |
| |
| from os import environ |
| import argparse |
| import re |
| import json |
| |
| convert = lambda text: int(text) if text.isdigit() else text.lower() |
| alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] |
| |
| def get_vars_by_type(type): |
| vars = [] |
| for var in environ: |
| if re.match(r"{}[0-9]+".format(type), var): |
| vars.append(var) |
| return sorted(vars, key=alphanum_key) |
| |
| |
| def get_vars(): |
| vars = get_vars_by_type('OCC') |
| if len(vars) == 0: |
| vars = get_vars_by_type('OC') |
| return vars |
| |
| |
| def get_local_node(node, ips=None): |
| if not ips: |
| ips = [environ[v] for v in get_vars()] |
| return 'atomix-{}'.format(ips.index(node) + 1) |
| |
| |
| def get_nodes(ips=None, default_port=5679): |
| node = lambda id, ip, port: {'id': id, 'address': '{}:{}'.format(ip, port)} |
| result = [] |
| if not ips: |
| ips = [environ[v] for v in get_vars()] |
| i = 1 |
| for ip_string in ips: |
| address_tuple = ip_string.split(":") |
| if len(address_tuple) == 3: |
| id=address_tuple[0] |
| ip=address_tuple[1] |
| port=int(address_tuple[2]) |
| else: |
| id='atomix-{}'.format(i) |
| i += 1 |
| ip=ip_string |
| port=default_port |
| result.append(node(id, ip, port)) |
| return result |
| |
| |
| def get_local_address(node, ips=None, default_port=5679): |
| if not ips: |
| ips = [environ[v] for v in get_vars()] |
| for ip_string in ips: |
| address_tuple = ip_string.split(":") |
| if len(address_tuple) == 3: |
| id=address_tuple[0] |
| ip=address_tuple[1] |
| port=int(address_tuple[2]) |
| if node == id or node == ip: |
| return '{}:{}'.format(ip, port) |
| return '{}:{}'.format(node, default_port) |
| |
| if __name__ == '__main__': |
| parser = argparse.ArgumentParser( |
| description="Generate the partitions json file given a list of IPs or from the $OCC* environment variables.") |
| parser.add_argument( |
| '-s', '--partition-size', type=int, default=3, |
| help="Number of nodes per partition. Note that partition sizes smaller than 3 are not fault tolerant. Defaults to 3." ) |
| parser.add_argument( |
| '-n', '--num-partitions', type=int, |
| help="Number of partitions. Defaults to the number of nodes in the cluster." ) |
| # TODO: make filename and nodes independent. This will break backwards compatibility with existing usage. |
| parser.add_argument( |
| 'node', metavar='node_ip', type=str, help='IP address of the node for which to generate the configuration') |
| parser.add_argument( |
| 'filename', metavar='filename', type=str, nargs='?', |
| help='File to write output to. If none is provided, output is written to stdout.') |
| parser.add_argument( |
| 'nodes', metavar='node_ip', type=str, nargs='*', |
| help='IP Address(es) of the node(s) in the cluster. If no IPs are given, ' + |
| 'will use the $OCC* environment variables. NOTE: these arguemnts' + |
| ' are only processed after the filename argument.') |
| |
| args = parser.parse_args() |
| filename = args.filename |
| partition_size = args.partition_size |
| local_member_id = get_local_node(args.node) |
| local_member_address = get_local_address(args.node, args.nodes) |
| nodes = get_nodes(args.nodes) |
| num_partitions = args.num_partitions |
| if not num_partitions: |
| num_partitions = len(nodes) |
| |
| data = { |
| 'cluster': { |
| 'clusterId': 'onos', |
| 'node': { |
| 'id': local_member_id, |
| 'address': local_member_address |
| }, |
| 'discovery': { |
| 'type': 'bootstrap', |
| 'nodes': nodes |
| } |
| }, |
| 'managementGroup': { |
| 'type': 'raft', |
| 'partitions': 1, |
| 'partitionSize': len(nodes), |
| 'members': [node['id'] for node in nodes], |
| 'storage': { |
| 'level': 'mapped' |
| } |
| }, |
| 'partitionGroups': { |
| 'raft': { |
| 'type': 'raft', |
| 'partitions': num_partitions, |
| 'partitionSize': partition_size, |
| 'members': [node['id'] for node in nodes], |
| 'storage': { |
| 'level': 'mapped' |
| } |
| } |
| } |
| } |
| output = json.dumps(data, indent=4) |
| |
| if filename: |
| with open(filename, 'w') as f: |
| f.write(output) |
| else: |
| print output |