Merge "Remove some of titan artifacts."
diff --git a/conf/README b/conf/README
index f43d67b..07f051f 100644
--- a/conf/README
+++ b/conf/README
@@ -1 +1 @@
-Please use onos-embedded.properties to start onos with embedded cassandra.
+<TODO: Add description about config files here>
diff --git a/conf/cassandra-repair.sh b/conf/cassandra-repair.sh
deleted file mode 100755
index 2c2638e..0000000
--- a/conf/cassandra-repair.sh
+++ /dev/null
@@ -1 +0,0 @@
-<cassandra_dir>/bin/nodetool repair
diff --git a/conf/cassandra.titan b/conf/cassandra.titan
deleted file mode 100644
index 9a9b00f..0000000
--- a/conf/cassandra.titan
+++ /dev/null
@@ -1,7 +0,0 @@
-storage.backend=cassandrathrift
-storage.hostname=localhost
-storage.keyspace=onos
-storage.connection-pool-size=4096
-storage.replication-factor=1
-storage.write-consistency-level=ALL
-storage.read-consistency-level=ONE
diff --git a/conf/cassandra.titan.nothrift b/conf/cassandra.titan.nothrift
deleted file mode 100644
index 04f9fd7..0000000
--- a/conf/cassandra.titan.nothrift
+++ /dev/null
@@ -1,7 +0,0 @@
-storage.backend=cassandra
-storage.hostname=localhost
-storage.keyspace=onos
-storage.connection-pool-size=4096
-storage.replication-factor=3
-storage.write-consistency-level=ALL
-storage.read-consistency-level=ONE
diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml
deleted file mode 100644
index 277f03a..0000000
--- a/conf/cassandra.yaml
+++ /dev/null
@@ -1,643 +0,0 @@
-# Cassandra storage config YAML
-
-# NOTE:
-#   See http://wiki.apache.org/cassandra/StorageConfiguration for
-#   full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: 'Test Cluster'
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-# num_tokens: 256
-
-# If you haven't specified num_tokens, or have set it to the default of 1 then
-# you should always specify InitialToken when setting up a production
-# cluster for the first time, and often when adding capacity later.
-# The principle is that each node should be given an equal slice of
-# the token ring; see http://wiki.apache.org/cassandra/Operations
-# for more details.
-#
-# If blank, Cassandra will request a token bisecting the range of
-# the heaviest-loaded existing node.  If there is no load information
-# available, such as is the case with a new cluster, it will pick
-# a random token, which will lead to hot spots.
-initial_token:
-
-# See http://wiki.apache.org/cassandra/HintedHandoff
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated.  After it has been dead this long, hints will be dropped.
-max_hint_window_in_ms: 10800000 # 3 hours
-# throttle in KB's per second, per delivery thread
-hinted_handoff_throttle_in_kb: 1024
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# The following setting populates the page cache on memtable flush and compaction
-# WARNING: Enable this setting only when the whole node's data fits in memory.
-# Defaults to: false
-# populate_io_cache_on_flush: false
-
-# authentication backend, implementing IAuthenticator; used to identify users
-authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-
-# authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
-
-# The partitioner is responsible for distributing rows (by key) across
-# nodes in the cluster.  Any IPartitioner may be used, including your
-# own as long as it is on the classpath.  Out of the box, Cassandra
-# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
-# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
-# 
-# - RandomPartitioner distributes rows across the cluster evenly by md5.
-#   This is the default prior to 1.2 and is retained for compatibility.
-# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
-#   Hash Function instead of md5.  When in doubt, this is the best option.
-# - ByteOrderedPartitioner orders rows lexically by key bytes.  BOP allows
-#   scanning rows in key order, but the ordering can generate hot spots
-#   for sequential insertion workloads.
-# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
-# - keys in a less-efficient format and only works with keys that are
-#   UTF8-encoded Strings.
-# - CollatingOPP colates according to EN,US rules rather than lexical byte
-#   ordering.  Use this as an example if you need custom collation.
-#
-# See http://wiki.apache.org/cassandra/Operations for more on
-# partitioners and token selection.
-partitioner: org.apache.cassandra.dht.RandomPartitioner
-
-# directories where Cassandra should store data on disk.
-data_file_directories:
-    - /tmp/cassandra/data
-
-# commit log
-commitlog_directory: /tmp/cassandra/commitlog
-
-# policy for data disk failures:
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       still inspectable via JMX.
-# best_effort: stop using the failed disk and respond to requests based on
-#              remaining available sstables.  This means you WILL see obsolete
-#              data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must store the whole values of
-# its rows, so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# safe the keys cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Maximum size of the row cache in memory.
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should
-# safe the row cache. Caches are saved to saved_caches_directory as specified
-# in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save
-# Disabled by default, meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# The provider for the row cache to use.
-#
-# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
-#
-# SerializingCacheProvider serialises the contents of the row and stores
-# it in native memory, i.e., off the JVM Heap. Serialized rows take
-# significantly less memory than "live" rows in the JVM, so you can cache
-# more rows in a given memory footprint.  And storing the cache off-heap
-# means you can use smaller heap sizes, reducing the impact of GC pauses.
-#
-# It is also valid to specify the fully-qualified class name to a class
-# that implements org.apache.cassandra.cache.IRowCacheProvider.
-#
-# Defaults to SerializingCacheProvider
-row_cache_provider: SerializingCacheProvider
-
-# saved caches
-saved_caches_directory: /tmp/cassandra/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch." 
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk.  It will wait up to
-# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
-# performing the sync.
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 50
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments.  A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentally from each columnfamily in the system) has been 
-# flushed to sstables.  
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-          - seeds: "127.0.0.1"
-
-# emergency pressure valve: each time heap usage after a full (CMS)
-# garbage collection is above this fraction of the max, Cassandra will
-# flush the largest memtables.  
-#
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-#
-# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
-# it is most effective under light to moderate load, or read-heavy
-# workloads; under truly massive write load, it will often be too
-# little, too late.
-flush_largest_memtables_at: 0.75
-
-# emergency pressure valve #2: the first time heap usage after a full
-# (CMS) garbage collection is above this fraction of the max,
-# Cassandra will reduce cache maximum _capacity_ to the given fraction
-# of the current _size_.  Should usually be set substantially above
-# flush_largest_memtables_at, since that will have less long-term
-# impact on the system.  
-# 
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-
-# Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.
-# If omitted, Cassandra will set it to 1/3 of the heap.
-# memtable_total_space_in_mb: 2048
-
-# Total space to use for commitlogs.  Since commitlog segments are
-# mmapped, and hence use up address space, the default size is 32
-# on 32-bit JVMs, and 1024 on 64-bit JVMs.
-#
-# If space gets above this value (it will round up to the next nearest
-# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.  So a small total commitlog space will tend
-# to cause more flush activity on less-active columnfamilies.
-# commitlog_total_space_in_mb: 4096
-
-# This sets the amount of memtable flush writer threads.  These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. If you have a large heap and many data directories,
-# you can increase this value for better flush performance.
-# By default this will be set to the amount of data directories defined.
-#memtable_flush_writers: 1
-
-# the number of full memtables to allow pending flush, that is,
-# waiting for a writer thread.  At a minimum, this should be set to
-# the maximum number of secondary indexes created on a single CF.
-memtable_flush_queue_size: 4
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSD:s; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-storage_port: 7000
-
-# SSL port, for encrypted communication.  Unused unless enabled in
-# encryption_options
-ssl_storage_port: 7001
-
-# Address to bind to and tell other Cassandra nodes to connect to. You
-# _must_ change this if you want multiple nodes to be able to
-# communicate!
-# 
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing *if* the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting this to 0.0.0.0 is always wrong.
-listen_address: localhost
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-# broadcast_address: 1.2.3.4
-
-
-# Whether to start the native transport server.
-# Currently, only the thrift server is started by default because the native
-# transport is considered beta.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: false
-# port for the CQL native transport to listen for clients on
-native_transport_port: 9042
-# The minimum and maximum threads for handling requests when the native
-# transport is used. The meaning is those is similar to the one of
-# rpc_min_threads and rpc_max_threads, though the default differ slightly and
-# are the ones below:
-# native_transport_min_threads: 16
-# native_transport_max_threads: 128
-
-
-# Whether to start the thrift rpc server.
-start_rpc: true
-# The address to bind the Thrift RPC service to -- clients connect
-# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
-# you want Thrift to listen on all interfaces.
-# 
-# Leaving this blank has the same effect it does for ListenAddress,
-# (i.e. it will be based on the configured hostname of the node).
-rpc_address: localhost
-# port for Thrift to listen for clients on
-rpc_port: 9160
-
-# enable or disable keepalive on rpc connections
-rpc_keepalive: true
-
-# Cassandra provides three out-of-the-box options for the RPC Server:
-#
-# sync  -> One thread per thrift connection. For a very large number of clients, memory
-#          will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
-#          per thread, and that will correspond to your use of virtual memory (but physical memory
-#          may be limited depending on use of stack space).
-#
-# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
-#          asynchronously using a small number of threads that does not vary with the amount
-#          of thrift clients (and thus scales well to many clients). The rpc requests are still
-#          synchronous (one thread per active request).
-#
-# The default is sync because on Windows hsha is about 30% slower.  On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum field length).
-thrift_framed_transport_size_in_mb: 15
-
-# The max length of a thrift message, including all fields and
-# internal thrift overhead.
-thrift_max_message_length_in_mb: 16
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# Keyspace data.  Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction.  Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you.  Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true 
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# Add column indexes to a row after its contents reach this size.
-# Increase if your column values are large, or if you have a very large
-# number of columns.  The competing causes are, Cassandra has to
-# deserialize this much of the row to read a single column, so you want
-# it to be small - at least if you do many partial-row reads - but all
-# the index data is read for each access, so you don't want to generate
-# that wastefully either.
-column_index_size_in_kb: 64
-
-# Size limit for rows being compacted in memory.  Larger rows will spill
-# over to disk and use a slower two-pass compaction process.  A message
-# will be logged specifying the row key.
-in_memory_compaction_limit_in_mb: 64
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair.  Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the number of cores.
-# Uncomment to make compaction mono-threaded, the pre-0.8 default.
-#concurrent_compactors: 1
-
-# Multi-threaded compaction. When enabled, each compaction will use
-# up to one thread per core, plus one thread per sstable being merged.
-# This is usually only useful for SSD-based hardware: otherwise, 
-# your concern is usually to get compaction to do LESS i/o (see:
-# compaction_throughput_mb_per_sec), not more.
-multithreaded_compaction: false
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Track cached row keys during compaction, and re-cache their new
-# positions in the compacted sstable.  Disable if you use really large
-# key caches.
-compaction_preheat_key_cache: true
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 400 Mbps or 50 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 400
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 10000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 10000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts, If disabled cassandra will assuming the request
-# was forwarded to the replica instantly by the coordinator
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This *can* involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 0, which never timeout streams.
-# streaming_socket_timeout_in_ms: 0
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch.  The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-#   requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-#   correlated failures. It does this by grouping machines into
-#   "datacenters" and "racks."  Cassandra will do its best not to have
-#   more than one replica on the same "rack" (which may not actually
-#   be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
-#    Treats Strategy order as proximity. This improves cache locality
-#    when disabling read repair, which can further improve throughput.
-#    Only appropriate for single-datacenter deployments.
-#  - PropertyFileSnitch:
-#    Proximity is determined by rack and data center, which are
-#    explicitly configured in cassandra-topology.properties.
-#  - GossipingPropertyFileSnitch
-#    The rack and datacenter for the local node are defined in
-#    cassandra-rackdc.properties and propagated to other nodes via gossip.  If
-#    cassandra-topology.properties exists, it is used as a fallback, allowing
-#    migration from the PropertyFileSnitch.
-#  - RackInferringSnitch:
-#    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's
-#    IP address, respectively.  Unless this happens to match your
-#    deployment conventions (as it did Facebook's), this is best used
-#    as an example of writing a custom Snitch class.
-#  - Ec2Snitch:
-#    Appropriate for EC2 deployments in a single Region.  Loads Region
-#    and Availability Zone information from the EC2 API. The Region is
-#    treated as the Datacenter, and the Availability Zone as the rack.
-#    Only private IPs are used, so this will not work across multiple
-#    Regions.
-#  - Ec2MultiRegionSnitch:
-#    Uses public IPs as broadcast_address to allow cross-region
-#    connectivity.  (Thus, you should set seed addresses to the public
-#    IP as well.) You will need to open the storage_port or
-#    ssl_storage_port on the public IP firewall.  (For intra-Region
-#    traffic, Cassandra will switch to the private IP after
-#    establishing a connection.)
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: SimpleSnitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100 
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it.  This is
-# expressed as a double which represents a percentage.  Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-# request_scheduler_id -- An identifer based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# index_interval controls the sampling of entries from the primrary
-# row index in terms of space versus time.  The larger the interval,
-# the smaller and less effective the sampling will be.  In technicial
-# terms, the interval coresponds to the number of index entries that
-# are skipped between taking each sample.  All the sampled entries
-# must fit in memory.  Generally, a value between 128 and 512 here
-# coupled with a large key cache size on CFs results in the best trade
-# offs.  This value is not often changed, however if you have many
-# very small rows (many to an OS page), then increasing this will
-# often lower memory usage without a impact on performance.
-index_interval: 128
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore.  For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
-
-# enable or disable client/server encryption.
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# can be:  all  - all traffic is compressed
-#          dc   - traffic between different datacenters is compressed
-#          none - nothing is compressed.
-internode_compression: all
diff --git a/conf/hazelcast.titan b/conf/hazelcast.titan
deleted file mode 100644
index d4719fa..0000000
--- a/conf/hazelcast.titan
+++ /dev/null
@@ -1,2 +0,0 @@
-storage.backend=hazelcastcache
-storage.directory=/tmp/cache
diff --git a/conf/onos-embedded.properties b/conf/onos-embedded.properties
deleted file mode 100644
index ec7a78f..0000000
--- a/conf/onos-embedded.properties
+++ /dev/null
@@ -1,17 +0,0 @@
-floodlight.modules = net.floodlightcontroller.core.FloodlightProvider,\
-net.floodlightcontroller.threadpool.ThreadPool,\
-net.onrc.onos.ofcontroller.floodlightlistener.RCNetworkGraphPublisher, \
-net.floodlightcontroller.ui.web.StaticWebRoutable,\
-net.onrc.onos.datagrid.HazelcastDatagrid,\
-net.onrc.onos.ofcontroller.flowmanager.FlowManager,\
-net.onrc.onos.ofcontroller.flowprogrammer.FlowProgrammer,\
-net.onrc.onos.ofcontroller.topology.TopologyManager,\
-net.onrc.onos.registry.controller.ZookeeperRegistry
-net.floodlightcontroller.restserver.RestApiServer.port = 8080
-net.floodlightcontroller.core.FloodlightProvider.openflowport = 6633
-net.floodlightcontroller.core.FloodlightProvider.workerthreads = 48
-net.floodlightcontroller.forwarding.Forwarding.idletimeout = 5
-net.floodlightcontroller.forwarding.Forwarding.hardtimeout = 0
-net.onrc.onos.ofcontroller.flowmanager.FlowManager.reuseDatabaseFlowPath = false
-#net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher.dbconf = conf/titan-embedded.properties
-net.onrc.onos.datagrid.HazelcastDatagrid.datagridConfig = conf/hazelcast.xml
diff --git a/conf/onos.properties b/conf/onos.properties
index ee2c9c9..3dfa462 100644
--- a/conf/onos.properties
+++ b/conf/onos.properties
@@ -15,7 +15,6 @@
 net.floodlightcontroller.forwarding.Forwarding.idletimeout = 5
 net.floodlightcontroller.forwarding.Forwarding.hardtimeout = 0
 net.onrc.onos.ofcontroller.flowmanager.FlowManager.reuseDatabaseFlowPath = false
-#net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher.dbconf = /tmp/cassandra.titan
 net.onrc.onos.datagrid.HazelcastDatagrid.datagridConfig = conf/hazelcast.xml
 #net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher.dbconf = /tmp/ramcloud.conf
 #net.onrc.onos.ofcontroller.floodlightlistener.NetworkGraphPublisher.graph_db_store = ramcloud
diff --git a/conf/titan-embedded.properties b/conf/titan-embedded.properties
deleted file mode 100644
index 7a28afb..0000000
--- a/conf/titan-embedded.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-storage.backend=embeddedcassandra
-storage.cassandra-config-dir=file:conf/cassandra.yaml
-storage.keyspace=onos
-storage.replication-factor=1
-storage.write-consistency-level=ALL
-storage.read-consistency-level=ONE
diff --git a/src/main/resources/cassandra.titan b/src/main/resources/cassandra.titan
deleted file mode 100644
index ef6f3ae..0000000
--- a/src/main/resources/cassandra.titan
+++ /dev/null
@@ -1,3 +0,0 @@
-storage.backend=cassandra
-storage.hostname=localhost
-storage.keyspace=onos
diff --git a/start-cassandra.sh b/start-cassandra.sh
deleted file mode 100755
index 3e9a8d2..0000000
--- a/start-cassandra.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-# Set paths
-ONOS_HOME=`dirname $0`
-CASSANDRA_DIR=${HOME}/apache-cassandra-1.2.4
-LOGDIR=${ONOS_HOME}/onos-logs
-CASSANDRA_LOG=${LOGDIR}/cassandara.`hostname`.log
-
-function lotate {
-    logfile=$1
-    nr_max=${2:-10}
-    if [ -f $logfile ]; then
-	for i in `seq $(expr $nr_max - 1) -1 1`; do
-	    if [ -f ${logfile}.${i} ]; then
-		mv -f ${logfile}.${i} ${logfile}.`expr $i + 1`
-	    fi
-	done
-	mv $logfile $logfile.1
-    fi
-}
-
-function start {
-  if [ ! -d ${LOGDIR} ]; then
-    mkdir -p ${LOGDIR}
-  fi
-  echo "rotate log: $log"
-  if [ -f $CASSANDRA_LOG ]; then
-    lotate $CASSANDRA_LOG
-  fi
-
-  # Run cassandra 
-  echo "Starting cassandra"
-#  echo "[WARNING] This script copies conf/cassandra.yaml to $CASSANDRA_DIR/conf/cassandra.yaml (overwrites)"
-#  echo "original cassandra.yaml was backed up as cassandra.yaml.backup"
-#  id=`hostid`
-#  cp ${CASSANDRA_DIR}/conf/cassandra.yaml $CASSANDRA_DIR/conf/cassandra.yaml.backup
-#  cp ${ONOS_HOME}/conf/cassandra.yaml.${id} $CASSANDRA_DIR/conf
-  $CASSANDRA_DIR/bin/cassandra > $CASSANDRA_LOG 2>&1 
-}
-
-function stop {
-  # Kill the existing processes
-  capid=`ps -edalf |grep java |grep apache-cassandra | awk '{print $4}'`
-  pids="$capid"
-  for p in ${pids}; do
-    if [ x$p != "x" ]; then
-      kill -KILL $p
-      echo "Killed existing prosess (pid: $p)"
-    fi
-  done
-}
-
-function deldb {
-#   # Delete the berkeley db database
-   if [ -d "/tmp/cassandra.titan" ]; then
-      echo "deleting berkeley db dir"
-      sudo rm -rf /tmp/cassandra.titan
-   fi
-}
-
-case "$1" in
-  start)
-    deldb
-    cp $ONOS_HOME/conf/cassandra.titan /tmp
-    stop
-    start 
-    ;;
-  stop)
-    stop
-    ;;
-#  deldb)
-#    deldb
-#    ;;
-  status)
-    n=`ps -edalf |grep java |grep apache-cassandra | wc -l`
-    echo "$n instance of cassandra running"
-    $CASSANDRA_DIR/bin/nodetool ring 
-    ;;
-  *)
-    echo "Usage: $0 {start|stop|restart|status}"
-    exit 1
-esac
diff --git a/titan/cassandra.local b/titan/cassandra.local
deleted file mode 100644
index 48955ca..0000000
--- a/titan/cassandra.local
+++ /dev/null
@@ -1,3 +0,0 @@
-storage.backend=cassandrathrift
-storage.hostname=127.0.0.1
-storage.keyspace=onos
\ No newline at end of file
diff --git a/titan/gremlin.sh b/titan/gremlin.sh
deleted file mode 100755
index a340dd7..0000000
--- a/titan/gremlin.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-
-if [ -z "${MVN}" ]; then
-    MVN="mvn"
-fi
-
-BASE_DIR=`dirname $0`
-ONOS_DIR="`dirname $0`/.."
-
-# Use a python script to parse the classpath out of the .classpath file
-if [ ! -f ${ONOS_DIR}/.javacp ]; then
-  echo "execute mvn compile at ONOS HOME directory."
-  exit 1
-fi
-CP=`cat ${ONOS_DIR}/.javacp`
-CP="${CP}:${ONOS_DIR}/target/classes"
-
-if [[ "$CP" == *"Error reading classpath file"* ]]; then
-    echo $CP
-    exit 1
-fi
-
-# Find Java 
-if [ "$JAVA_HOME" = "" ] ; then
-    JAVA="java -server"
-else
-    JAVA="$JAVA_HOME/bin/java -server"
-fi
-
-# Set Java options
-if [ "$JAVA_OPTIONS" = "" ] ; then
-    JAVA_OPTIONS="-Xms32m -Xmx512m"
-fi
-
-# Launch the application 
-if [ "$1" = "-e" ]; then
-  k=$2
-  if [ $# -gt 2 ]; then
-    for (( i=3 ; i < $# + 1 ; i++ ))
-    do
-      eval a=\$$i
-      k="$k \"$a\""
-    done
-  fi
-
-  eval $JAVA $JAVA_OPTIONS -cp $CP:$CLASSPATH com.thinkaurelius.titan.tinkerpop.gremlin.ScriptExecutor $k
-else
-  if [ "$1" = "-v" ]; then
-    $JAVA $JAVA_OPTIONS -cp $CP:$CLASSPATH com.tinkerpop.gremlin.Version
-  else
-    pushd $BASE_DIR >/dev/null
-    $JAVA $JAVA_OPTIONS -cp $CP:$CLASSPATH com.thinkaurelius.titan.tinkerpop.gremlin.Console
-    popd >/dev/null
-  fi
-fi
-
-# Return the program's exit code 
-exit $?
diff --git a/titan/listDevices b/titan/listDevices
deleted file mode 100644
index ea57f41..0000000
--- a/titan/listDevices
+++ /dev/null
@@ -1,2 +0,0 @@
-g.stopTransaction(SUCCESS);
-g.V('type', 'device').map;
\ No newline at end of file
diff --git a/titan/listFlow b/titan/listFlow
deleted file mode 100644
index 6b23be5..0000000
--- a/titan/listFlow
+++ /dev/null
@@ -1,2 +0,0 @@
-g.stopTransaction(SUCCESS);
-g.V('type', 'flow').map;
diff --git a/titan/listFlowEntry b/titan/listFlowEntry
deleted file mode 100644
index 42dc50f..0000000
--- a/titan/listFlowEntry
+++ /dev/null
@@ -1,2 +0,0 @@
-g.stopTransaction(SUCCESS);
-g.V('type', 'flow_entry').map;
diff --git a/titan/listNotUpdated b/titan/listNotUpdated
deleted file mode 100644
index a44c458..0000000
--- a/titan/listNotUpdated
+++ /dev/null
@@ -1,4 +0,0 @@
-g.stopTransaction(SUCCESS)
-g.V('type', 'flow_entry').each{
-if (it.switch_state.equals("FE_SWITCH_NOT_UPDATED")) println it.map.next()
-}
\ No newline at end of file
diff --git a/titan/listP b/titan/listP
deleted file mode 100644
index e318f7d..0000000
--- a/titan/listP
+++ /dev/null
@@ -1,2 +0,0 @@
-g.stopTransaction(SUCCESS);
-g.V('type', 'port').map;
\ No newline at end of file
diff --git a/titan/listS b/titan/listS
deleted file mode 100644
index c7e796c..0000000
--- a/titan/listS
+++ /dev/null
@@ -1,2 +0,0 @@
-g.stopTransaction(SUCCESS);
-g.V('type', 'switch').map;
\ No newline at end of file
diff --git a/titan/open b/titan/open
deleted file mode 100644
index 6efe890..0000000
--- a/titan/open
+++ /dev/null
@@ -1 +0,0 @@
-g = TitanFactory.open('/tmp/cassandra.titan')
\ No newline at end of file
diff --git a/titan/schema/dbapi-testdata.graphml b/titan/schema/dbapi-testdata.graphml
deleted file mode 100644
index 952a62a..0000000
--- a/titan/schema/dbapi-testdata.graphml
+++ /dev/null
@@ -1 +0,0 @@
-<?xml version=\"1.0\" ?><graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\">    <key id=\"id\" for=\"node\" attr.name=\"id\" attr.type=\"string\"></key>    <key id=\"type\" for=\"node\" attr.name=\"type\" attr.type=\"string\"></key>    <key id=\"dpid\" for=\"node\" attr.name=\"dpid\" attr.type=\"string\"></key>    <key id=\"desc\" for=\"node\" attr.name=\"desc\" attr.type=\"string\"></key>    <key id=\"number\" for=\"node\" attr.name=\"number\" attr.type=\"int\"></key>    <key id=\"dl_add\" for=\"node\" attr.name=\"dl_addr\" attr.type=\"string\"></key>    <key id=\"nw_addr\" for=\"node\" attr.name=\"nw_addr\" attr.type=\"string\"></key>    <key id=\"id\" for=\"edge\" attr.name=\"id\" attr.type=\"string\"></key>    <key id=\"source\" for=\"edge\" attr.name=\"source\" attr.type=\"string\"></key>    <key id=\"target\" for=\"edge\" attr.name=\"target\" attr.type=\"string\"></key>    <key id=\"label\" for=\"edge\" attr.name=\"label\" attr.type=\"string\"></key>    <graph id=\"G\" edgedefault=\"directed\">        <node id=\"1\">            <data key=\"type\">switch</data>            <data key=\"dpid\">00:00:00:00:00:00:0a:01</data>            <data key=\"desc\">OpenFlow Switch at SEA</data>        </node>        <node id=\"2\">            <data key=\"type\">switch</data>            <data key=\"dpid\">00:00:00:00:00:00:0a:02</data>            <data key=\"desc\">OpenFlow Switch at LAX</data>        </node>        <node id=\"3\">            <data key=\"type\">switch</data>            <data key=\"dpid\">00:00:00:00:00:00:0a:03</data>            <data key=\"desc\">OpenFlow Switch at CHI</data>        </node>        <node id=\"4\">            <data key=\"type\">switch</data>            <data key=\"dpid\">00:00:00:00:00:00:0a:04</data>            <data key=\"desc\">OpenFlow Switch at IAH</data>        </node>        <node id=\"5\">            <data key=\"type\">switch</data>            <data key=\"dpid\">00:00:00:00:00:00:0a:05</data>            <data key=\"desc\">OpenFlow Switch at NYC</data>        </node>        <node id=\"6\">            <data key=\"type\">switch</data>            <data key=\"dpid\">00:00:00:00:00:00:0a:06</data>            <data key=\"desc\">OpenFlow Switch at ATL</data>        </node>        <node id=\"100\">            <data key=\"type\">port</data>            <data key=\"number\">1</data>            <data key=\"desc\">port 1 at SEA Switch</data>        </node>        <node id=\"101\">            <data key=\"type\">port</data>            <data key=\"number\">2</data>            <data key=\"desc\">port 2 at SEA Switch</data>        </node>        <node id=\"102\">            <data key=\"type\">port</data>            <data key=\"number\">3</data>            <data key=\"desc\">port 3 at SEA Switch</data>        </node>        <node id=\"103\">            <data key=\"type\">port</data>            <data key=\"number\">4</data>            <data key=\"desc\">port 4 at SEA Switch</data>        </node>        <node id=\"104\">            <data key=\"type\">port</data>            <data key=\"number\">1</data>            <data key=\"desc\">port 1 at LAX Switch</data>        </node>        <node id=\"105\">            <data key=\"type\">port</data>            <data key=\"number\">2</data>            <data key=\"desc\">port 2 at LAX Switch</data>        </node>        <node id=\"106\">            <data key=\"type\">port</data>            <data key=\"number\">3</data>            <data key=\"desc\">port 3 at LAX Switch</data>        </node>        <node id=\"107\">            <data key=\"type\">port</data>            <data key=\"number\">1</data>            <data key=\"desc\">port 1 at CHI Switch</data>        </node>        <node id=\"108\">            <data key=\"type\">port</data>            <data key=\"number\">2</data>            <data key=\"desc\">port 2 at CHI Switch</data>        </node>        <node id=\"109\">            <data key=\"type\">port</data>            <data key=\"number\">3</data>            <data key=\"desc\">port 3 at CHI Switch</data>        </node>        <node id=\"110\">            <data key=\"type\">port</data>            <data key=\"number\">4</data>            <data key=\"desc\">port 4 at CHI Switch</data>        </node>        <node id=\"111\">            <data key=\"type\">port</data>            <data key=\"number\">1</data>            <data key=\"desc\">port 1 at IAH Switch</data>        </node>        <node id=\"112\">            <data key=\"type\">port</data>            <data key=\"number\">2</data>            <data key=\"desc\">port 2 at IAH Switch</data>        </node>        <node id=\"113\">            <data key=\"type\">port</data>            <data key=\"number\">3</data>            <data key=\"desc\">port 3 at IAH Switch</data>        </node>        <node id=\"114\">            <data key=\"type\">port</data>            <data key=\"number\">1</data>            <data key=\"desc\">port 1 at NYC Switch</data>        </node>        <node id=\"115\">            <data key=\"type\">port</data>            <data key=\"number\">2</data>            <data key=\"desc\">port 2 at NYC Switch</data>        </node>        <node id=\"116\">            <data key=\"type\">port</data>            <data key=\"number\">3</data>            <data key=\"desc\">port 3 at NYC Switch</data>        </node>        <node id=\"117\">            <data key=\"type\">port</data>            <data key=\"number\">1</data>            <data key=\"desc\">port 1 at ATL Switch</data>        </node>        <node id=\"118\">            <data key=\"type\">port</data>            <data key=\"number\">2</data>            <data key=\"desc\">port 2 at ATL Switch</data>        </node>        <node id=\"119\">            <data key=\"type\">port</data>            <data key=\"number\">3</data>            <data key=\"desc\">port 3 at ATL Switch</data>        </node>        <node id=\"1000\">            <data key=\"type\">device</data>            <data key=\"dl_addr\">20:c9:d0:4a:e1:73</data>            <data key=\"nw_addr\">192.168.10.101</data>        </node>        <node id=\"1001\">            <data key=\"type\">device</data>            <data key=\"dl_addr\">20:c9:d0:4a:e1:62</data>            <data key=\"nw_addr\">192.168.20.101</data>        </node>        <node id=\"1002\">            <data key=\"type\">device</data>            <data key=\"dl_addr\">10:40:f3:e6:8d:55</data>            <data key=\"nw_addr\">192.168.10.1</data>        </node>        <node id=\"1003\">            <data key=\"type\">device</data>            <data key=\"dl_addr\">a0:b3:cc:9c:c6:88</data>            <data key=\"nw_addr\">192.168.20.1</data>        </node>        <node id=\"1004\">            <data key=\"type\">device</data>            <data key=\"dl_addr\">00:04:20:e2:50:a2</data>            <data key=\"nw_addr\">192.168.30.1</data>        </node>        <node id=\"1005\">            <data key=\"type\">device</data>            <data key=\"dl_addr\">58:55:ca:c4:1b:a0</data>            <data key=\"nw_addr\">192.168.40.1</data>        </node>        <edge id=\"10000\" source=\"1\" target=\"101\" label=\"on\"></edge>        <edge id=\"10001\" source=\"1\" target=\"102\" label=\"on\"></edge>        <edge id=\"10002\" source=\"1\" target=\"103\" label=\"on\"></edge>        <edge id=\"10003\" source=\"2\" target=\"104\" label=\"on\"></edge>        <edge id=\"10004\" source=\"2\" target=\"105\" label=\"on\"></edge>        <edge id=\"10005\" source=\"2\" target=\"106\" label=\"on\"></edge>        <edge id=\"10006\" source=\"3\" target=\"107\" label=\"on\"></edge>        <edge id=\"10007\" source=\"3\" target=\"108\" label=\"on\"></edge>        <edge id=\"10008\" source=\"3\" target=\"109\" label=\"on\"></edge>        <edge id=\"10009\" source=\"3\" target=\"110\" label=\"on\"></edge>        <edge id=\"10010\" source=\"4\" target=\"111\" label=\"on\"></edge>        <edge id=\"10011\" source=\"4\" target=\"112\" label=\"on\"></edge>        <edge id=\"10012\" source=\"4\" target=\"113\" label=\"on\"></edge>        <edge id=\"10013\" source=\"5\" target=\"114\" label=\"on\"></edge>        <edge id=\"10014\" source=\"5\" target=\"115\" label=\"on\"></edge>        <edge id=\"10015\" source=\"5\" target=\"116\" label=\"on\"></edge>        <edge id=\"10016\" source=\"6\" target=\"117\" label=\"on\"></edge>        <edge id=\"10017\" source=\"6\" target=\"118\" label=\"on\"></edge>        <edge id=\"10018\" source=\"6\" target=\"119\" label=\"on\"></edge>        <edge id=\"11000\" source=\"101\" target=\"107\" label=\"link\"></edge>         <edge id=\"11003\" source=\"105\" target=\"111\" label=\"link\"></edge>        <edge id=\"11004\" source=\"107\" target=\"101\" label=\"link\"></edge>        <edge id=\"11005\" source=\"108\" target=\"112\" label=\"link\"></edge>        <edge id=\"11006\" source=\"109\" target=\"114\" label=\"link\"></edge>        <edge id=\"11007\" source=\"111\" target=\"105\" label=\"link\"></edge>        <edge id=\"11008\" source=\"112\" target=\"108\" label=\"link\"></edge>        <edge id=\"11009\" source=\"113\" target=\"117\" label=\"link\"></edge>        <edge id=\"11010\" source=\"114\" target=\"109\" label=\"link\"></edge>        <edge id=\"11011\" source=\"115\" target=\"118\" label=\"link\"></edge>        <edge id=\"11012\" source=\"117\" target=\"113\" label=\"link\"></edge>        <edge id=\"11013\" source=\"118\" target=\"115\" label=\"link\"></edge>        <edge id=\"12000\" source=\"103\" target=\"1000\" label=\"host\"></edge>        <edge id=\"12001\" source=\"103\" target=\"1001\" label=\"host\"></edge>        <edge id=\"12002\" source=\"110\" target=\"1002\" label=\"host\"></edge>        <edge id=\"12003\" source=\"116\" target=\"1003\" label=\"host\"></edge>        <edge id=\"12004\" source=\"106\" target=\"1004\" label=\"host\"></edge>        <edge id=\"12005\" source=\"119\" target=\"1005\" label=\"host\"></edge>      </graph>    </graphml>
diff --git a/titan/schema/schema.xml b/titan/schema/schema.xml
deleted file mode 100644
index da01a7f..0000000
--- a/titan/schema/schema.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0" ?>
-<graphml xmlns="http://graphml.graphdrawing.org/xmlns">
-    <key id="name" for="node" attr.name="name" attr.type="string"></key>
-    <key id="type" for="node" attr.name="type" attr.type="string"></key>
-    <key id="id" for="node" attr.name="id" attr.type="string"></key>
-    <key id="time" for="edge" attr.name="time" attr.type="int"></key>
-    <graph id="G" edgedefault="directed">
-        <node id="13">
-            <data key="name">dpid1-port3</data>
-            <data key="number">3</data>
-            <data key="type">port</data>
-        </node>
-        <node id="2">
-            <data key="name">dpid2</data>
-            <data key="dpid">2</data>
-            <data key="type">switch</data>
-        </node>
-        <node id="110">
-            <data key="name">dpid1-port10</data>
-            <data key="number">10</data>
-            <data key="type">port</data>
-        </node>
-        <node id="1">
-            <data key="name">dpid1</data>
-            <data key="dpid">1</data>
-            <data key="type">switch</data>
-        </node>
-        <node id="210">
-            <data key="name">dpid2-port10</data>
-            <data key="number">10</data>
-            <data key="type">port</data>
-        </node>
-        <node id="3">
-            <data key="name">dpid3</data>
-            <data key="dpid">3</data>
-            <data key="type">switch</data>
-        </node>
-        <node id="6">
-            <data key="name">stanford</data>
-            <data key="type">location</data>
-        </node>
-        <node id="5">
-            <data key="name">berkeley</data>
-            <data key="type">location</data>
-        </node>
-        <node id="4">
-            <data key="name">onlab</data>
-            <data key="type">location</data>
-        </node>
-        <node id="39">
-            <data key="name">dpid3-port9</data>
-            <data key="number">10</data>
-            <data key="type">port</data>
-        </node>
-        <node id="8">
-            <data key="name">MAC:1234</data>
-            <data key="type">device</data>
-        </node>
-        <node id="11">
-            <data key="name">dpid1-port1</data>
-            <data key="number">1</data>
-            <data key="type">port</data>
-        </node>
-        <edge id="523" source="1" target="13" label="on"></edge>
-        <edge id="521" source="1" target="110" label="on"></edge>
-        <edge id="517" source="2" target="210" label="on"></edge>
-        <edge id="518" source="3" target="39" label="on"></edge>
-        <edge id="520" source="13" target="8" label="host"></edge>
-        <edge id="519" source="110" target="210" label="link"></edge>
-        <edge id="515" source="2" target="6" label="at"></edge>
-        <edge id="513" source="1" target="4" label="at"></edge>
-        <edge id="516" source="3" target="5" label="at"></edge>
-        <edge id="776" source="1" target="13" label="time_checked">
-            <data key="time">1</data>
-        </edge>
-        <edge id="27" source="110" target="210" label="expired">
-            <data key="time">2</data>
-        </edge>
-    </graph>
-</graphml>
diff --git a/titan/schema/test-network.xml b/titan/schema/test-network.xml
deleted file mode 100644
index 2c3b993..0000000
--- a/titan/schema/test-network.xml
+++ /dev/null
@@ -1,264 +0,0 @@
-<?xml version="1.0" ?>
-<graphml xmlns="http://graphml.graphdrawing.org/xmlns">
-
-    <key id="id" for="node" attr.name="id" attr.type="string"></key>
-    <key id="type" for="node" attr.name="type" attr.type="string"></key>
-    <key id="state" for="node" attr.name="state" attr.type="string"></key>
-    <key id="dpid" for="node" attr.name="dpid" attr.type="string"></key>
-    <key id="desc" for="node" attr.name="desc" attr.type="string"></key>
-    <key id="number" for="node" attr.name="number" attr.type="int"></key>
-    <key id="dl_addr" for="node" attr.name="dl_addr" attr.type="string"></key>
-    <key id="nw_addr" for="node" attr.name="nw_addr" attr.type="string"></key>
-    <key id="id" for="edge" attr.name="id" attr.type="string"></key>
-    <key id="source" for="edge" attr.name="source" attr.type="string"></key>
-    <key id="target" for="edge" attr.name="target" attr.type="string"></key>
-    <key id="label" for="edge" attr.name="label" attr.type="string"></key>
-
-    <graph id="G" edgedefault="directed">
-        <node id="1">
-            <data key="type">switch</data>
-            <data key="dpid">00:00:00:00:00:00:0a:01</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">OpenFlow Switch at SEA</data>
-        </node>
-        <node id="2">
-            <data key="type">switch</data>
-            <data key="dpid">00:00:00:00:00:00:0a:02</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">OpenFlow Switch at LAX</data>
-        </node>
-        <node id="3">
-            <data key="type">switch</data>
-            <data key="dpid">00:00:00:00:00:00:0a:03</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">OpenFlow Switch at CHI</data>
-        </node>
-        <node id="4">
-            <data key="type">switch</data>
-            <data key="dpid">00:00:00:00:00:00:0a:04</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">OpenFlow Switch at IAH</data>
-        </node>
-        <node id="5">
-            <data key="type">switch</data>
-            <data key="dpid">00:00:00:00:00:00:0a:05</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">OpenFlow Switch at NYC</data>
-        </node>
-        <node id="6">
-            <data key="type">switch</data>
-            <data key="dpid">00:00:00:00:00:00:0a:06</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">OpenFlow Switch at ATL</data>
-        </node>
-
-        <node id="100">
-            <data key="type">port</data>
-            <data key="number">1</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 1 at SEA Switch</data>
-        </node>
-        <node id="101">
-            <data key="type">port</data>
-            <data key="number">2</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 2 at SEA Switch</data>
-        </node>
-        <node id="102">
-            <data key="type">port</data>
-            <data key="number">3</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 3 at SEA Switch</data>
-        </node>
-        <node id="103">
-            <data key="type">port</data>
-            <data key="number">4</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 4 at SEA Switch</data>
-        </node>
-
-        <node id="104">
-            <data key="type">port</data>
-            <data key="number">1</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 1 at LAX Switch</data>
-        </node>
-        <node id="105">
-            <data key="type">port</data>
-            <data key="number">2</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 2 at LAX Switch</data>
-        </node>
-        <node id="106">
-            <data key="type">port</data>
-            <data key="number">3</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 3 at LAX Switch</data>
-        </node>
-
-        <node id="107">
-            <data key="type">port</data>
-            <data key="number">1</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 1 at CHI Switch</data>
-        </node>
-        <node id="108">
-            <data key="type">port</data>
-            <data key="number">2</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 2 at CHI Switch</data>
-        </node>
-        <node id="109">
-            <data key="type">port</data>
-            <data key="number">3</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 3 at CHI Switch</data>
-        </node>
-        <node id="110">
-            <data key="type">port</data>
-            <data key="number">4</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 4 at CHI Switch</data>
-        </node>
-
-        <node id="111">
-            <data key="type">port</data>
-            <data key="number">1</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 1 at IAH Switch</data>
-        </node>
-        <node id="112">
-            <data key="type">port</data>
-            <data key="number">2</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 2 at IAH Switch</data>
-        </node>
-        <node id="113">
-            <data key="type">port</data>
-            <data key="number">3</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 3 at IAH Switch</data>
-        </node>
-
-        <node id="114">
-            <data key="type">port</data>
-            <data key="number">1</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 1 at NYC Switch</data>
-        </node>
-        <node id="115">
-            <data key="type">port</data>
-            <data key="number">2</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 2 at NYC Switch</data>
-        </node>
-        <node id="116">
-            <data key="type">port</data>
-            <data key="number">3</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 3 at NYC Switch</data>
-        </node>
-
-        <node id="117">
-            <data key="type">port</data>
-            <data key="number">1</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 1 at ATL Switch</data>
-        </node>
-        <node id="118">
-            <data key="type">port</data>
-            <data key="number">2</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 2 at ATL Switch</data>
-        </node>
-        <node id="119">
-            <data key="type">port</data>
-            <data key="number">3</data>
-            <data key="state">ACTIVE</data>
-            <data key="desc">port 3 at ATL Switch</data>
-        </node>
-
-        <node id="1000">
-            <data key="type">device</data>
-            <data key="dl_addr">20:c9:d0:4a:e1:73</data>
-            <data key="nw_addr">192.168.10.101</data>
-        </node>
-        <node id="1001">
-            <data key="type">device</data>
-            <data key="dl_addr">20:c9:d0:4a:e1:62</data>
-            <data key="nw_addr">192.168.20.101</data>
-        </node>
-        <node id="1002">
-            <data key="type">device</data>
-            <data key="dl_addr">10:40:f3:e6:8d:55</data>
-            <data key="nw_addr">192.168.10.1</data>
-        </node>
-        <node id="1003">
-            <data key="type">device</data>
-            <data key="dl_addr">a0:b3:cc:9c:c6:88</data>
-            <data key="nw_addr">192.168.20.1</data>
-        </node>
-        <node id="1004">
-            <data key="type">device</data>
-            <data key="dl_addr">00:04:20:e2:50:a2</data>
-            <data key="nw_addr">192.168.30.1</data>
-        </node>
-        <node id="1005">
-            <data key="type">device</data>
-            <data key="dl_addr">58:55:ca:c4:1b:a0</data>
-            <data key="nw_addr">192.168.40.1</data>
-        </node>
-
-        <edge id="10000" source="1" target="101" label="on"></edge>
-        <edge id="10001" source="1" target="102" label="on"></edge>
-        <edge id="10002" source="1" target="103" label="on"></edge>
-
-        <edge id="10003" source="2" target="104" label="on"></edge>
-        <edge id="10004" source="2" target="105" label="on"></edge>
-        <edge id="10005" source="2" target="106" label="on"></edge>
-
-        <edge id="10006" source="3" target="107" label="on"></edge>
-        <edge id="10007" source="3" target="108" label="on"></edge>
-        <edge id="10008" source="3" target="109" label="on"></edge>
-        <edge id="10009" source="3" target="110" label="on"></edge>
-
-        <edge id="10010" source="4" target="111" label="on"></edge>
-        <edge id="10011" source="4" target="112" label="on"></edge>
-        <edge id="10012" source="4" target="113" label="on"></edge>
-
-        <edge id="10013" source="5" target="114" label="on"></edge>
-        <edge id="10014" source="5" target="115" label="on"></edge>
-        <edge id="10015" source="5" target="116" label="on"></edge>
-
-        <edge id="10016" source="6" target="117" label="on"></edge>
-        <edge id="10017" source="6" target="118" label="on"></edge>
-        <edge id="10018" source="6" target="119" label="on"></edge>
-
-        <edge id="11000" source="101" target="107" label="link"></edge>
-        <!--<edge id="11001" source="102" target="104" label="link"></edge>-->
-
-        <edge id="11002" source="104" target="102" label="link"></edge>
-        <edge id="11003" source="105" target="111" label="link"></edge>
-
-        <edge id="11004" source="107" target="101" label="link"></edge>
-        <edge id="11005" source="108" target="112" label="link"></edge>
-        <edge id="11006" source="109" target="114" label="link"></edge>
-
-        <edge id="11007" source="111" target="105" label="link"></edge>
-        <edge id="11008" source="112" target="108" label="link"></edge>
-        <edge id="11009" source="113" target="117" label="link"></edge>
-
-        <edge id="11010" source="114" target="109" label="link"></edge>
-        <edge id="11011" source="115" target="118" label="link"></edge>
-
-        <edge id="11012" source="117" target="113" label="link"></edge>
-        <edge id="11013" source="118" target="115" label="link"></edge>
-
-        <edge id="12000" source="103" target="1000" label="host"></edge>
-        <edge id="12001" source="103" target="1001" label="host"></edge>
-        <edge id="12002" source="110" target="1002" label="host"></edge>
-        <edge id="12003" source="116" target="1003" label="host"></edge>
-        <edge id="12004" source="106" target="1004" label="host"></edge>
-        <edge id="12005" source="119" target="1005" label="host"></edge>
-    </graph>
-</graphml>