Merge branch 'master' of https://github.com/OPENNETWORKINGLAB/ONOS
diff --git a/conf/README b/conf/README
new file mode 100644
index 0000000..f43d67b
--- /dev/null
+++ b/conf/README
@@ -0,0 +1 @@
+Please use onos-embedded.properties to start onos with embedded cassandra.
diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml
new file mode 100644
index 0000000..277f03a
--- /dev/null
+++ b/conf/cassandra.yaml
@@ -0,0 +1,643 @@
+# Cassandra storage config YAML
+
+# NOTE:
+#   See http://wiki.apache.org/cassandra/StorageConfiguration for
+#   full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+# num_tokens: 256
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node.  If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated.  After it has been dead this long, hints will be dropped.
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster.  Any IPartitioner may be used, including your
+# own as long as it is on the classpath.  Out of the box, Cassandra
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
+# 
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+#   This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+#   Hash Function instead of md5.  When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes.  BOP allows
+#   scanning rows in key order, but the ordering can generate hot spots
+#   for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+#   UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+#   ordering.  Use this as an example if you need custom collation.
+#
+# See http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.RandomPartitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+    - /tmp/cassandra/data
+
+# commit log
+commitlog_directory: /tmp/cassandra/commitlog
+
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+#       still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+#              remaining available sstables.  This means you WILL see obsolete
+#              data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint.  And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: /tmp/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch." 
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk.  It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments.  A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been 
+# flushed to sstables.  
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+    # Addresses of hosts that are deemed contact points. 
+    # Cassandra nodes use this list of hosts to find each other and learn
+    # the topology of the ring.  You must change this if you are running
+    # multiple nodes!
+    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+      parameters:
+          # seeds is actually a comma-delimited list of addresses.
+          # Ex: "<ip1>,<ip2>,<ip3>"
+          - seeds: "127.0.0.1"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.  
+#
+# Set to 1.0 to disable.  Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_.  Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.  
+# 
+# Set to 1.0 to disable.  Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables.  Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.  Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.  So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads.  These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread.  At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: 7000
+
+# SSL port, for encrypted communication.  Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7001
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+# 
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: localhost
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: false
+# port for the CQL native transport to listen for clients on
+native_transport_port: 9042
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+# native_transport_max_threads: 128
+
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+# 
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: localhost
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three out-of-the-box options for the RPC Server:
+#
+# sync  -> One thread per thrift connection. For a very large number of clients, memory
+#          will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
+#          per thread, and that will correspond to your use of virtual memory (but physical memory
+#          may be limited depending on use of stack space).
+#
+# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+#          asynchronously using a small number of threads that does not vary with the amount
+#          of thrift clients (and thus scales well to many clients). The rpc requests are still
+#          synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower.  On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data.  Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction.  Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you.  Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true 
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns.  The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory.  Larger rows will spill
+# over to disk and use a slower two-pass compaction process.  A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair.  Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise, 
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable.  Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch.  The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+#   requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+#   correlated failures. It does this by grouping machines into
+#   "datacenters" and "racks."  Cassandra will do its best not to have
+#   more than one replica on the same "rack" (which may not actually
+#   be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+#  - SimpleSnitch:
+#    Treats Strategy order as proximity. This improves cache locality
+#    when disabling read repair, which can further improve throughput.
+#    Only appropriate for single-datacenter deployments.
+#  - PropertyFileSnitch:
+#    Proximity is determined by rack and data center, which are
+#    explicitly configured in cassandra-topology.properties.
+#  - GossipingPropertyFileSnitch
+#    The rack and datacenter for the local node are defined in
+#    cassandra-rackdc.properties and propagated to other nodes via gossip.  If
+#    cassandra-topology.properties exists, it is used as a fallback, allowing
+#    migration from the PropertyFileSnitch.
+#  - RackInferringSnitch:
+#    Proximity is determined by rack and data center, which are
+#    assumed to correspond to the 3rd and 2nd octet of each node's
+#    IP address, respectively.  Unless this happens to match your
+#    deployment conventions (as it did Facebook's), this is best used
+#    as an example of writing a custom Snitch class.
+#  - Ec2Snitch:
+#    Appropriate for EC2 deployments in a single Region.  Loads Region
+#    and Availability Zone information from the EC2 API. The Region is
+#    treated as the Datacenter, and the Availability Zone as the rack.
+#    Only private IPs are used, so this will not work across multiple
+#    Regions.
+#  - Ec2MultiRegionSnitch:
+#    Uses public IPs as broadcast_address to allow cross-region
+#    connectivity.  (Thus, you should set seed addresses to the public
+#    IP as well.) You will need to open the storage_port or
+#    ssl_storage_port on the public IP firewall.  (For intra-Region
+#    traffic, Cassandra will switch to the private IP after
+#    establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100 
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it.  This is
+# expressed as a double which represents a percentage.  Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+#  - throttle_limit -- The throttle_limit is the number of in-flight
+#                      requests per client.  Requests beyond 
+#                      that limit are queued up until
+#                      running requests can complete.
+#                      The value of 80 here is twice the number of
+#                      concurrent_reads + concurrent_writes.
+#  - default_weight -- default_weight is optional and allows for
+#                      overriding the default which is 1.
+#  - weights -- Weights are optional and will default to 1 or the
+#               overridden default_weight. The weight translates into how
+#               many requests are handled during each turn of the
+#               RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+#    throttle_limit: 80
+#    default_weight: 5
+#    weights:
+#      Keyspace1: 1
+#      Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time.  The larger the interval,
+# the smaller and less effective the sampling will be.  In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample.  All the sampled entries
+# must fit in memory.  Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs.  This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore.  For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+    internode_encryption: none
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    truststore: conf/.truststore
+    truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# enable or disable client/server encryption.
+client_encryption_options:
+    enabled: false
+    keystore: conf/.keystore
+    keystore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be:  all  - all traffic is compressed
+#          dc   - traffic between different datacenters is compressed
+#          none - nothing is compressed.
+internode_compression: all
diff --git a/conf/onos-embedded.properties b/conf/onos-embedded.properties
new file mode 100644
index 0000000..50c36f9
--- /dev/null
+++ b/conf/onos-embedded.properties
@@ -0,0 +1,18 @@
+floodlight.modules = net.floodlightcontroller.storage.memory.MemoryStorageSource,\
+net.floodlightcontroller.core.FloodlightProvider,\
+net.floodlightcontroller.threadpool.ThreadPool,\
+net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl,\
+net.floodlightcontroller.staticflowentry.StaticFlowEntryPusher,\
+net.floodlightcontroller.counter.CounterStore,\
+net.floodlightcontroller.perfmon.PktInProcessingTime,\
+net.floodlightcontroller.ui.web.StaticWebRoutable,\
+net.floodlightcontroller.onoslistener.OnosPublisher, \
+net.onrc.onos.registry.controller.ZookeeperRegistry
+net.floodlightcontroller.restserver.RestApiServer.port = 8080
+net.floodlightcontroller.core.FloodlightProvider.openflowport = 6633
+net.floodlightcontroller.core.FloodlightProvider.workerthreads = 48
+net.floodlightcontroller.jython.JythonDebugInterface.port = 6655
+net.floodlightcontroller.forwarding.Forwarding.idletimeout = 5
+net.floodlightcontroller.forwarding.Forwarding.hardtimeout = 0
+net.floodlightcontroller.onoslistener.OnosPublisher.dbconf = conf/titan-embedded.properties
+
diff --git a/conf/titan-embedded.properties b/conf/titan-embedded.properties
new file mode 100644
index 0000000..7a28afb
--- /dev/null
+++ b/conf/titan-embedded.properties
@@ -0,0 +1,6 @@
+storage.backend=embeddedcassandra
+storage.cassandra-config-dir=file:conf/cassandra.yaml
+storage.keyspace=onos
+storage.replication-factor=1
+storage.write-consistency-level=ALL
+storage.read-consistency-level=ONE
diff --git a/src/main/java/net/floodlightcontroller/core/INetMapTopologyObjects.java b/src/main/java/net/floodlightcontroller/core/INetMapTopologyObjects.java
index 4a03327..3d5b907 100644
--- a/src/main/java/net/floodlightcontroller/core/INetMapTopologyObjects.java
+++ b/src/main/java/net/floodlightcontroller/core/INetMapTopologyObjects.java
@@ -1,5 +1,6 @@
 package net.floodlightcontroller.core;
 
+import net.floodlightcontroller.core.INetMapTopologyObjects.IPortObject;
 import net.floodlightcontroller.flowcache.web.DatapathSummarySerializer;
 
 import org.codehaus.jackson.annotate.JsonIgnore;
@@ -11,6 +12,7 @@
 import com.tinkerpop.frames.Incidence;
 import com.tinkerpop.frames.Property;
 import com.tinkerpop.frames.annotations.gremlin.GremlinGroovy;
+import com.tinkerpop.frames.annotations.gremlin.GremlinParam;
 import com.tinkerpop.frames.VertexFrame;
 
 public interface INetMapTopologyObjects {
@@ -46,9 +48,9 @@
 		public Iterable<IPortObject> getPorts();
 
 // Requires Frames 2.3.0		
-//		@JsonIgnore
-//		@GremlinGroovy("_().out('on').has('number',port_num)")
-//		public IPortObject getPort(@GremlinParam("port_num") final short port_num);
+		@JsonIgnore
+		@GremlinGroovy("_().out('on').has('number',port_num)")
+		public IPortObject getPort(@GremlinParam("port_num") final short port_num);
 		
 		@Adjacency(label="on")
 		public void addPort(final IPortObject port);
@@ -63,6 +65,7 @@
 		@JsonIgnore
 		@Incidence(label="switch",direction = Direction.IN)
 		public Iterable<IFlowEntry> getFlowEntries();
+
 	}
 	
 	public interface IPortObject extends IBaseObject{
diff --git a/src/main/java/net/floodlightcontroller/core/internal/Controller.java b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
index 842ef35..dc421a4 100644
--- a/src/main/java/net/floodlightcontroller/core/internal/Controller.java
+++ b/src/main/java/net/floodlightcontroller/core/internal/Controller.java
@@ -2234,13 +2234,6 @@
         //Set the controller's role to MASTER so it always tries to do role requests.
         this.role = Role.MASTER;
         this.roleChanger = new RoleChanger();
-        
-		String conf = configParams.get("dbconf");
-		if (conf == null || conf.isEmpty()) {
-			conf = "/tmp/cassandra.titan";
-			log.debug("did not get DB config setting using default {}", conf);
-		}
-		log.debug("setting DB config {}", conf);
 		
         initVendorMessages();
         this.systemStartTime = System.currentTimeMillis();
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
index 1efe015..1df7eb5 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkDiscovery.java
@@ -8,6 +8,7 @@
 
     @JsonSerialize(using=ToStringSerializer.class)
     public enum UpdateOperation {
+    	LINK_ADDED("Link Added"),
         LINK_UPDATED("Link Updated"),
         LINK_REMOVED("Link Removed"),
         SWITCH_UPDATED("Switch Updated"),
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkStorage.java b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkStorage.java
index eb2fac9..90836ca 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkStorage.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/ILinkStorage.java
@@ -17,7 +17,7 @@
 	/*
 	 *  Add Linkinfo
 	 */
-	public void addOrUpdateLink (Link link, LinkInfo linkinfo, DM_OPERATION op);
+	public void updateLink (Link link, LinkInfo linkinfo, DM_OPERATION op);
 	
 	/*
 	 * Delete a single link
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
index 72b2988..ac44eeb 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkDiscoveryManager.java
@@ -44,7 +44,6 @@
 import net.floodlightcontroller.core.IFloodlightProviderService.Role;
 import net.floodlightcontroller.core.IHAListener;
 import net.floodlightcontroller.core.IInfoProvider;
-import net.floodlightcontroller.core.INetMapStorage.DM_OPERATION;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IOFSwitchListener;
@@ -197,20 +196,6 @@
     protected LLDPTLV controllerTLV;
     protected ReentrantReadWriteLock lock;
     int lldpTimeCount = 0;
-
-    // Storage
-
-    ThreadLocal<LinkStorageImpl> store = new ThreadLocal<LinkStorageImpl>() {
-		@Override
-		protected LinkStorageImpl initialValue() {
-			LinkStorageImpl swStore = new LinkStorageImpl();
-			//TODO: Get the file path from global properties
-			swStore.init("/tmp/cassandra.titan");
-			return swStore;
-		}
-	};
-    protected LinkStorageImpl linkStore = store.get();
-   // protected SwitchStorageImpl swStore;
     
     /**
      * Flag to indicate if automatic port fast is enabled or not.
@@ -1076,11 +1061,7 @@
 
                 writeLinkToStorage(lt, newInfo);
                 
-                // Write link to network map
-                operation = NetworkMapOperation.INSERT;
-                //linkStore.update(lt, newInfo, DM_OPERATION.INSERT);
-                
-                updateOperation = UpdateOperation.LINK_UPDATED;
+                updateOperation = UpdateOperation.LINK_ADDED;
                 linkChanged = true;
 
                 // Add to event history
@@ -1135,10 +1116,6 @@
                 // valid time, plus the port states if they've changed (i.e. if
                 // they weren't set to null in the previous block of code.
                 writeLinkToStorage(lt, newInfo);
-
-                // Write link to network map
-                operation = NetworkMapOperation.UPDATE;
-                //linkStore.update(lt, newInfo, DM_OPERATION.UPDATE);
                 
                 if (linkChanged) {
                     updateOperation = getUpdateOperation(newInfo.getSrcPortState(),
@@ -1169,17 +1146,6 @@
             lock.writeLock().unlock();
         }
         
-        switch (operation){
-        case INSERT:
-        	linkStore.update(lt, newInfo, DM_OPERATION.INSERT);
-        	break;
-        case UPDATE:
-        	linkStore.update(lt, newInfo, DM_OPERATION.UPDATE);
-        	break;
-        case NONE:
-        default:
-        	break;
-        }
 
         return linkChanged;
     }
@@ -1246,10 +1212,6 @@
 
                 // remove link from storage.
                 removeLinkFromStorage(lt);
-
-                // remote link from network map
-                linkStore.update(lt, DM_OPERATION.DELETE);
-
                 
                 // TODO  Whenever link is removed, it has to checked if
                 // the switchports must be added to quarantine.
@@ -1301,7 +1263,7 @@
                     ((byte)OFPortReason.OFPPR_MODIFY.ordinal() ==
                     ps.getReason() && !portEnabled(ps.getDesc()))) {
                 deleteLinksOnPort(npt, "Port Status Changed");
-                //swStore.deletePort(HexString.toHexString(npt.getNodeId()), npt.getPortId());
+                
                 LDUpdate update = new LDUpdate(sw, port, UpdateOperation.PORT_DOWN);
                 updates.add(update);
                 linkDeleted = true;
@@ -1345,9 +1307,6 @@
                                                      operation));
                             writeLinkToStorage(lt, linkInfo);
                             
-                            // Write the changed link to the network map
-                            linkStore.update(lt,  linkInfo, DM_OPERATION.UPDATE);
-                            
                             linkInfoChanged = true;
                         }
                     }
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkStorageImpl.java b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkStorageImpl.java
index d62d65b..c8ca86f 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkStorageImpl.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/LinkStorageImpl.java
@@ -5,8 +5,6 @@
 
 import net.floodlightcontroller.core.INetMapTopologyObjects.IPortObject;
 import net.floodlightcontroller.core.INetMapTopologyObjects.ISwitchObject;
-import net.floodlightcontroller.core.INetMapTopologyService.ITopoSwitchService;
-import net.floodlightcontroller.core.internal.TopoSwitchServiceImpl;
 import net.floodlightcontroller.linkdiscovery.ILinkStorage;
 import net.floodlightcontroller.linkdiscovery.LinkInfo;
 import net.floodlightcontroller.routing.Link;
@@ -48,7 +46,7 @@
 		case UPDATE:
 		case CREATE:
 		case INSERT:
-			addOrUpdateLink(link, linkinfo, op);
+			updateLink(link, linkinfo, op);
 			break;
 		case DELETE:
 			deleteLink(link);
@@ -56,11 +54,11 @@
 		}
 	}
 	
-	public void addOrUpdateLink(Link lt, LinkInfo linkinfo, DM_OPERATION op) {
+	public void updateLink(Link lt, LinkInfo linkinfo, DM_OPERATION op) {
 		GraphDBConnection conn = GraphDBConnection.getInstance(this.conf);
 		IPortObject vportSrc = null, vportDst = null;
 	
-		log.trace("addOrUpdateLink(): op {} {} {}", new Object[]{op, lt, linkinfo});
+		log.trace("updateLink(): op {} {} {}", new Object[]{op, lt, linkinfo});
 		
         try {
             // get source port vertex
@@ -93,18 +91,18 @@
             		vportSrc.setLinkPort(vportDst);
 
             		conn.endTx(Transaction.COMMIT);
-            		log.debug("addOrUpdateLink(): link added {} {} src {} dst {}", new Object[]{op, lt, vportSrc, vportDst});
+            		log.debug("updateLink(): link added {} {} src {} dst {}", new Object[]{op, lt, vportSrc, vportDst});
             	}
             } else {
-            	log.error("addOrUpdateLink(): failed invalid vertices {} {} src {} dst {}", new Object[]{op, lt, vportSrc, vportDst});
- //           	conn.endTx(Transaction.ROLLBACK);
+            	log.error("updateLink(): failed invalid vertices {} {} src {} dst {}", new Object[]{op, lt, vportSrc, vportDst});
+            	conn.endTx(Transaction.ROLLBACK);
             }
         } catch (TitanException e) {
             /*
              * retry till we succeed?
              */
         	e.printStackTrace();
-        	log.error("addOrUpdateLink(): titan exception {} {} {}", new Object[]{op, lt, e.toString()});
+        	log.error("updateLink(): titan exception {} {} {}", new Object[]{op, lt, e.toString()});
         }
 	}
 	
@@ -154,7 +152,7 @@
             	
             } else {
             	log.error("deleteLink(): failed invalid vertices {} src {} dst {}", new Object[]{lt, vportSrc, vportDst});
-//            	conn.endTx(Transaction.ROLLBACK);
+            	conn.endTx(Transaction.ROLLBACK);
             }
          	
         } catch (TitanException e) {
@@ -162,6 +160,7 @@
              * retry till we succeed?
              */
         	log.error("deleteLink(): titan exception {} {}", new Object[]{lt, e.toString()});
+        	conn.endTx(Transaction.ROLLBACK);
         	e.printStackTrace();
         }
 	}
diff --git a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/TopoLinkServiceImpl.java b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/TopoLinkServiceImpl.java
index 1bd6421..9d5fdba 100644
--- a/src/main/java/net/floodlightcontroller/linkdiscovery/internal/TopoLinkServiceImpl.java
+++ b/src/main/java/net/floodlightcontroller/linkdiscovery/internal/TopoLinkServiceImpl.java
@@ -8,6 +8,7 @@
 import net.floodlightcontroller.linkdiscovery.internal.LinkStorageImpl.ExtractLink;
 import net.floodlightcontroller.routing.Link;
 import net.onrc.onos.util.GraphDBConnection;
+import net.onrc.onos.util.GraphDBConnection.Transaction;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,6 +52,7 @@
 			}
 						
 		}
+		conn.endTx(Transaction.COMMIT);
 		return links;
 	}
 
diff --git a/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java b/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java
index d35a4f8..19ed705 100644
--- a/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java
+++ b/src/main/java/net/floodlightcontroller/onoslistener/OnosPublisher.java
@@ -32,6 +32,10 @@
 import net.floodlightcontroller.devicemanager.IDeviceStorage;
 import net.floodlightcontroller.devicemanager.internal.DeviceStorageImpl;
 import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryListener;
+import net.floodlightcontroller.linkdiscovery.ILinkDiscoveryService;
+import net.floodlightcontroller.linkdiscovery.ILinkStorage;
+import net.floodlightcontroller.linkdiscovery.internal.LinkStorageImpl;
+import net.floodlightcontroller.routing.Link;
 import net.floodlightcontroller.threadpool.IThreadPoolService;
 import net.onrc.onos.registry.controller.IControllerRegistryService;
 import net.onrc.onos.registry.controller.IControllerRegistryService.ControlChangeCallback;
@@ -44,6 +48,7 @@
 	
 	protected IDeviceStorage devStore;
 	protected ISwitchStorage swStore;
+	protected ILinkStorage linkStore;
 	protected static Logger log;
 	protected IDeviceService deviceService;
 	protected IControllerRegistryService registryService;
@@ -56,6 +61,7 @@
 	
 	protected final int CLEANUP_TASK_INTERVAL = 60; // 1 min
 	protected SingletonTask cleanupTask;
+	protected ILinkDiscoveryService linkDiscovery;
 	
 	/**
      *  Cleanup and synch switch state from registry
@@ -119,13 +125,22 @@
 	@Override
 	public void linkDiscoveryUpdate(LDUpdate update) {
 		// TODO Auto-generated method stub
+		Link lt = new Link(update.getSrc(),update.getSrcPort(),update.getDst(),update.getDstPort());
+		log.debug("{}:LinkDicoveryUpdate(): Updating Link {}",this.getClass(), lt);
 		switch (update.getOperation()) {
 		
 			case LINK_REMOVED:
+				linkStore.update(lt, DM_OPERATION.DELETE);
 				// TODO: Move network map link removal here
 				// reconcile paths here
 //				IPortObject srcPort = conn.utils().searchPort(conn, HexString.toHexString(update.getSrc()), update.getSrcPort());
 				break;
+			case LINK_UPDATED:
+				linkStore.update(lt, DM_OPERATION.UPDATE);
+				break;
+			case LINK_ADDED:
+				linkStore.update(lt, DM_OPERATION.INSERT);
+				break;
 					
 			default:
 				break;
@@ -242,6 +257,7 @@
 		floodlightProvider =
 	            context.getServiceImpl(IFloodlightProviderService.class);
 		deviceService = context.getServiceImpl(IDeviceService.class);
+		linkDiscovery = context.getServiceImpl(ILinkDiscoveryService.class);
 		threadPool = context.getServiceImpl(IThreadPoolService.class);
 		registryService = context.getServiceImpl(IControllerRegistryService.class);
 		
@@ -250,6 +266,9 @@
 		
 		swStore = new SwitchStorageImpl();
 		swStore.init(conf);
+		
+		linkStore = new LinkStorageImpl();
+		linkStore.init(conf);
 				
 		log.debug("Initializing OnosPublisher module with {}", conf);
 		
@@ -263,6 +282,7 @@
 
 		deviceService.addListener(this);
 		floodlightProvider.addOFSwitchListener(this);
+		linkDiscovery.addListener(this);
 		
 		log.debug("Adding EventListener");
 		conn.addEventListener(new LocalTopologyEventListener(conn));
diff --git a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
index ba17483..958fc84 100644
--- a/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
+++ b/src/main/java/net/floodlightcontroller/topology/TopologyManager.java
@@ -17,6 +17,7 @@
 import net.floodlightcontroller.core.FloodlightContext;
 import net.floodlightcontroller.core.IFloodlightProviderService;
 import net.floodlightcontroller.core.IFloodlightProviderService.Role;
+import net.floodlightcontroller.core.INetMapStorage.DM_OPERATION;
 import net.floodlightcontroller.core.IOFMessageListener;
 import net.floodlightcontroller.core.IOFSwitch;
 import net.floodlightcontroller.core.IHAListener;
@@ -871,7 +872,8 @@
             if (log.isTraceEnabled()) {
                 log.trace("Applying update: {}", update);
             }
-            if (update.getOperation() == UpdateOperation.LINK_UPDATED) {
+            if (update.getOperation() == UpdateOperation.LINK_UPDATED ||
+            		update.getOperation() == UpdateOperation.LINK_ADDED	) {
                 addOrUpdateLink(update.getSrc(), update.getSrcPort(),
                                 update.getDst(), update.getDstPort(),
                                 update.getType());
diff --git a/src/main/java/net/onrc/onos/util/GraphDBUtils.java b/src/main/java/net/onrc/onos/util/GraphDBUtils.java
index 6601bf9..2522765 100644
--- a/src/main/java/net/onrc/onos/util/GraphDBUtils.java
+++ b/src/main/java/net/onrc/onos/util/GraphDBUtils.java
@@ -55,29 +55,29 @@
 	@Override
 	public IPortObject searchPort(GraphDBConnection conn, String dpid, short number) {
 		ISwitchObject sw = searchSwitch(conn, dpid);
-//		if (sw != null) {
-//			
-//			IPortObject port = null;
-//			
-			// Requires Frames 2.3.0
-//			
-//			try {
-//				port = sw.getPort(number);
-//			} catch (Exception e) {
-//				// TODO Auto-generated catch block
-//				e.printStackTrace();
-//			}
-//			
-//			return port;
-//		}
-		
 		if (sw != null) {
-			GremlinPipeline<Vertex, IPortObject> pipe = new GremlinPipeline<Vertex, IPortObject>();
-			pipe.start(sw.asVertex());
-			pipe.out("on").has("number", number);
-			FramedVertexIterable<IPortObject> r = new FramedVertexIterable<IPortObject>(conn.getFramedGraph(), (Iterable) pipe, IPortObject.class);
-			return r != null && r.iterator().hasNext() ? r.iterator().next() : null;
+			
+			IPortObject port = null;
+			
+			// Requires Frames 2.3.0
+			
+			try {
+				port = sw.getPort(number);
+			} catch (Exception e) {
+				// TODO Auto-generated catch block
+				e.printStackTrace();
+			}
+			
+			return port;
 		}
+		
+//		if (sw != null) {
+//			GremlinPipeline<Vertex, IPortObject> pipe = new GremlinPipeline<Vertex, IPortObject>();
+//			pipe.start(sw.asVertex());
+//			pipe.out("on").has("number", number);
+//			FramedVertexIterable<IPortObject> r = new FramedVertexIterable<IPortObject>(conn.getFramedGraph(), (Iterable) pipe, IPortObject.class);
+//			return r != null && r.iterator().hasNext() ? r.iterator().next() : null;
+//		}
 		return null;
 	}
 
diff --git a/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java b/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
index a6948c0..3dfe5fe 100644
--- a/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
+++ b/src/test/java/net/floodlightcontroller/core/internal/ControllerTest.java
@@ -545,6 +545,16 @@
                 nPortChanged++;
                 notifyAll();
             }
+			@Override
+			public void switchPortAdded(Long switchId, OFPhysicalPort port) {
+				// TODO Auto-generated method stub
+				
+			}
+			@Override
+			public void switchPortRemoved(Long switchId, OFPhysicalPort port) {
+				// TODO Auto-generated method stub
+				
+			}
         }
         DummySwitchListener switchListener = new DummySwitchListener();
         IOFSwitch sw = createMock(IOFSwitch.class);