Merge pull request #5 from floodlight/master

merge
diff --git a/.abat-automerge b/.abat-automerge
new file mode 100755
index 0000000..772134b
--- /dev/null
+++ b/.abat-automerge
@@ -0,0 +1,10 @@
+#!/bin/bash -eux
+ROOTDIR=$(dirname $(readlink -f $0))
+cd "$ROOTDIR"
+
+ln -sf ../../.hooks/pre-commit .git/hooks/pre-commit
+make all check-all
+
+if [[ ${ARTIFACT_REPO_URL-} ]]; then
+    ./.build/push-artifacts.sh ${ARTIFACT_REPO_URL}
+fi
diff --git a/.build/push-artifacts.sh b/.build/push-artifacts.sh
new file mode 100755
index 0000000..8c7b60b
--- /dev/null
+++ b/.build/push-artifacts.sh
@@ -0,0 +1,61 @@
+#!/bin/bash -eu
+
+# Push the loxigen artifacts to a dedicated git repository,
+# along with a nice commit message and a tag
+
+ARTIFACT_REPO_URL="$1"
+if [[ ! $ARTIFACT_REPO_URL ]]; then
+    echo "Call syntax: $0 <artifact_repo_url>" >&2
+    exit 1
+fi
+
+ARTIFACT_REPO=$(mktemp -d --tmpdir "push-artifacts-repo.XXXXXXX")
+
+git clone ${ARTIFACT_REPO_URL} ${ARTIFACT_REPO}
+find ${ARTIFACT_REPO} -mindepth 1 -maxdepth 1 -type d \! -name '.*' -print0 | xargs -0 rm -r
+make LOXI_OUTPUT_DIR=${ARTIFACT_REPO} clean all
+
+loxi_head=$(git rev-parse HEAD)
+last_loxi_log=$(git log --format=oneline -1)
+git_log_file=$(mktemp --tmpdir "git-log-file.XXXXXXX")
+
+last_loxi_revision=""
+
+if [[ -e "${ARTIFACT_REPO}/loxi-revision" ]]; then
+    last_loxi_revision=$(cat "${ARTIFACT_REPO}/loxi-revision" |  cut -d ' ' -f 1)
+    if [[ $(git cat-file -t "$last_loxi_revision" 2>/dev/null) != "commit" ]]; then
+        echo "Last loxi revision ${last_loxi_revision} specified in ${ARTIFACT_REPO_URL}/loxi-revision not found in loxigen repo"
+        last_loxi_revision=""
+    fi
+fi
+
+if [[ $last_loxi_revision ]]; then
+    echo "Last loxi revision committed: $last_loxi_revision"
+    git log $last_loxi_revision..${loxi_head} >>$git_log_file
+    loxi_github_url="https://github.com/floodlight/loxigen/compare/${last_loxi_revision}...${loxi_head}"
+else
+    echo "No Previous loxi revision info found"
+    git log -1 HEAD >>$git_log_file
+    loxi_github_url="https://github.com/floodlight/loxigen/commit/${loxi_head}"
+fi
+
+
+(
+    set -xe
+    cd $ARTIFACT_REPO
+    echo $last_loxi_log >loxi-revision
+    git add -A
+
+    (
+       echo "Artifacts from ${loxi_github_url}"
+       echo
+       echo "Loxigen Head commit floodlight/loxigen@${loxi_head}"
+       cat $git_log_file
+    ) | git commit --file=-
+
+    git tag -a -f "loxi/${loxi_head}" -m "Tag Loxigen Revision ${loxi_head}"
+    git push --tags
+    git push
+)
+
+rm -rf ${ARTIFACT_REPO}
diff --git a/.gitignore b/.gitignore
index 8b36dd4..c0b10be 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,9 @@
 .loxi_gen_files
 loxi_output
 loxigen.log
+tags
+.*.swp
+.*.swo
+*.cache
+openflowj-loxi
+/bin
diff --git a/.hooks/pre-commit b/.hooks/pre-commit
new file mode 100755
index 0000000..3f0aa09
--- /dev/null
+++ b/.hooks/pre-commit
@@ -0,0 +1,94 @@
+#!/bin/bash -e
+
+echo "Running ${PWD}/$0"
+
+# Create a script that diffs a staged file vs. the original, and looks for illegal
+# tabs in new or modified lines
+script=$(mktemp /tmp/check.XXXXXX)
+trap "rm -f ${script}" EXIT
+cat >${script} <<"EOF"
+#!/bin/bash
+diff --old-line-format= --unchanged-line-format= "$1" "$2" | grep -q $'\t' || exit
+fn=$(basename "$2")
+[ "$2" != "${2%%.java}" ] || [ "$2" != "${2%%.py}" ] || exit
+echo "$2"
+EOF
+chmod +x ${script}
+
+# Run the script on all staged files
+badfiles=$(git difftool --staged -y -x ${script})
+
+if [ "${badfiles}" ]; then
+  echo "New or modified lines in the following files contain tab characters:"
+  echo "${badfiles}" | sed "s/^/  /"
+  echo "Please correct these problems and retry the commit."
+  exit 1
+fi
+
+if git rev-parse --verify HEAD >/dev/null 2>&1
+then
+	against=HEAD
+else
+	# Initial commit: diff against an empty tree object
+	against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
+fi
+
+set +e
+# If you want to allow non-ascii filenames set this variable to true.
+allownonascii=$(git config hooks.allownonascii)
+# Redirect output to stderr.
+exec 1>&2
+
+# Cross platform projects tend to avoid non-ascii filenames; prevent
+# them from being added to the repository. We exploit the fact that the
+# printable range starts at the space character and ends with tilde.
+if [ "$allownonascii" != "true" ] &&
+	# Note that the use of brackets around a tr range is ok here, (it's
+	# even required, for portability to Solaris 10's /usr/bin/tr), since
+	# the square bracket bytes happen to fall in the designated range.
+	test $(git diff --cached --name-only --diff-filter=A -z $against |
+	  LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0
+then
+	echo "Error: Attempt to add a non-ascii file name."
+	echo
+	echo "This can cause problems if you want to work"
+	echo "with people on other platforms."
+	echo
+	echo "To be portable it is advisable to rename the file ..."
+	echo
+	echo "If you know what you are doing you can disable this"
+	echo "check using:"
+	echo
+	echo "  git config hooks.allownonascii true"
+	echo
+	exit 1
+fi
+
+cfg_check_whitespace=$(git config hooks.checkwhitespace)
+
+if [ -e .git/MERGE_MSG ]; then
+    # this is a merge commit - default to no check
+    if [[ $cfg_check_whitespace == "true" ]]; then
+        check_whitespace=true
+    else
+        check_whitespace=false
+    fi
+else
+    # this is not merge commit - default to check
+    if [[ $cfg_check_whitespace != "false" ]]; then
+        check_whitespace=true
+    else
+        check_whitespace=false
+    fi
+fi
+
+if ! git diff --cached --name-only | grep -q -E '\.(java|c|h|yaml|yang|json|sh|xsl|py)$'; then
+    echo "No source files found in commit - skipping whitespace check"
+    check_whitespace=false
+fi
+
+if [[ $check_whitespace == "true" ]]; then
+    # If there are whitespace errors, print the offending file names and fail.
+    rv=0
+    exec git diff-index --check --cached $against
+fi
diff --git a/Makefile b/Makefile
index 6b63fa6..3767fba 100644
--- a/Makefile
+++ b/Makefile
@@ -34,31 +34,82 @@
 LOXI_OUTPUT_DIR = loxi_output
 
 # Generated files depend on all Loxi code and input files
-LOXI_PY_FILES=$(shell find \( -name loxi_output -prune \
+LOXI_PY_FILES=$(shell find . \( -name loxi_output -prune \
                              -o -name templates -prune \
-                             -o -true \
+                             -o -name tests -prune \
+                             -o -name '*' \
                            \) -a -name '*.py')
 LOXI_TEMPLATE_FILES=$(shell find */templates -type f -a \
                                  \! \( -name '*.cache' -o -name '.*' \))
 INPUT_FILES = $(wildcard openflow_input/*)
+TEST_DATA = $(shell find test_data -name '*.data')
+OPENFLOWJ_OUTPUT_DIR = ${LOXI_OUTPUT_DIR}/openflowj
+OPENFLOWJ_ECLIPSE_WORKSPACE = openflowj-loxi
 
-all: c python
+all: c python java wireshark
 
 c: .loxi_ts.c
 
-.loxi_ts.c: ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES}
+.loxi_ts.c: ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES} ${TEST_DATA}
 	./loxigen.py --install-dir=${LOXI_OUTPUT_DIR} --lang=c
 	touch $@
 
 python: .loxi_ts.python
 
-.loxi_ts.python: ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES}
+.loxi_ts.python: ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES} ${TEST_DATA}
 	./loxigen.py --install-dir=${LOXI_OUTPUT_DIR} --lang=python
 	touch $@
 
+python-doc: python
+	rm -rf ${LOXI_OUTPUT_DIR}/pyloxi-doc
+	mkdir -p ${LOXI_OUTPUT_DIR}/pyloxi-doc
+	cp -a py_gen/sphinx ${LOXI_OUTPUT_DIR}/pyloxi-doc/input
+	PYTHONPATH=${LOXI_OUTPUT_DIR}/pyloxi sphinx-apidoc -o ${LOXI_OUTPUT_DIR}/pyloxi-doc/input ${LOXI_OUTPUT_DIR}/pyloxi
+	sphinx-build ${LOXI_OUTPUT_DIR}/pyloxi-doc/input ${LOXI_OUTPUT_DIR}/pyloxi-doc
+	rm -rf ${LOXI_OUTPUT_DIR}/pyloxi-doc/input
+	@echo "HTML documentation output to ${LOXI_OUTPUT_DIR}/pyloxi-doc"
+
+java: .loxi_ts.java
+	@rsync -rt java_gen/pre-written/ ${LOXI_OUTPUT_DIR}/openflowj/
+	@if [ -e ${OPENFLOWJ_ECLIPSE_WORKSPACE} ]; then \
+		rsync --checksum --delete -rv ${LOXI_OUTPUT_DIR}/openflowj/gen-src/ ${OPENFLOWJ_ECLIPSE_WORKSPACE}/gen-src; \
+	fi
+
+.loxi_ts.java: ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES} ${TEST_DATA}
+	./loxigen.py --install-dir=${LOXI_OUTPUT_DIR} --lang=java
+	touch $@
+
+eclipse-workspace:
+	mkdir -p ${OPENFLOWJ_ECLIPSE_WORKSPACE}
+	ln -sf ../java_gen/pre-written/pom.xml ${OPENFLOWJ_ECLIPSE_WORKSPACE}/pom.xml
+	ln -sf ../java_gen/pre-written/LICENSE.txt ${OPENFLOWJ_ECLIPSE_WORKSPACE}/LICENSE.txt
+	ln -sf ../java_gen/pre-written/src ${OPENFLOWJ_ECLIPSE_WORKSPACE}
+	rsync --checksum --delete -rv ${LOXI_OUTPUT_DIR}/openflowj/gen-src/ ${OPENFLOWJ_ECLIPSE_WORKSPACE}/gen-src
+	cd ${OPENFLOWJ_ECLIPSE_WORKSPACE} && mvn eclipse:eclipse
+	# Unfortunately, mvn eclipse:eclipse resolves the symlink, which doesn't work with eclipse
+	cd ${OPENFLOWJ_ECLIPSE_WORKSPACE} && perl -pi -e 's{<classpathentry kind="src" path="[^"]*/java_gen/pre-written/src/}{<classpathentry kind="src" path="src/}' .classpath
+
+check-java: java
+	cd ${OPENFLOWJ_OUTPUT_DIR} && mvn compile test-compile test
+
+package-java: java
+	cd ${OPENFLOWJ_OUTPUT_DIR} && mvn package
+
+deploy-java: java
+	cd ${OPENFLOWJ_OUTPUT_DIR} && mvn deploy
+
+install-java: java
+	cd ${OPENFLOWJ_OUTPUT_DIR} && mvn install
+
+wireshark: .loxi_ts.wireshark
+
+.loxi_ts.wireshark: ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES}
+	./loxigen.py --install-dir=${LOXI_OUTPUT_DIR} --lang=wireshark
+	touch $@
+
 clean:
 	rm -rf loxi_output # only delete generated files in the default directory
-	rm -f loxigen.log loxigen-test.log .loxi_ts.c .loxi_ts.python
+	rm -f loxigen.log loxigen-test.log .loxi_ts.*
 
 debug:
 	@echo "LOXI_OUTPUT_DIR=\"${LOXI_OUTPUT_DIR}\""
@@ -69,18 +120,26 @@
 	@echo
 	@echo "INPUT_FILES=\"${INPUT_FILES}\""
 
+check-all: check check-c check-py check-java
+
 check:
-	PYTHONPATH=. ./utest/test_parser.py
+	nosetests
+
+check-py: python
+	PYTHONPATH=${LOXI_OUTPUT_DIR}/pyloxi:. python py_gen/tests/generic_util.py
+	PYTHONPATH=${LOXI_OUTPUT_DIR}/pyloxi:. python py_gen/tests/of10.py
+	PYTHONPATH=${LOXI_OUTPUT_DIR}/pyloxi:. python py_gen/tests/of11.py
+	PYTHONPATH=${LOXI_OUTPUT_DIR}/pyloxi:. python py_gen/tests/of12.py
+	PYTHONPATH=${LOXI_OUTPUT_DIR}/pyloxi:. python py_gen/tests/of13.py
+
+check-c: c
+	make -j4 -C ${LOXI_OUTPUT_DIR}/locitest
+	${LOXI_OUTPUT_DIR}/locitest/locitest
 
 pylint:
 	pylint -E ${LOXI_PY_FILES}
 
-.PHONY: all clean debug check pylint c python
+ctags:
+	ctags ${LOXI_PY_FILES} ${LOXI_TEMPLATE_FILES} ${INPUT_FILES} ${TEST_DATA}
 
-ifdef BIGCODE
-# Internal build system compatibility
-MODULE := LoxiGen
-LOXI_OUTPUT_DIR = ${BIGCODE}/Modules
-modulemake:
-.PHONY: modulemake
-endif
+.PHONY: all clean debug check pylint c python
diff --git a/README.md b/README.md
index 63b91f3..1e70f9d 100644
--- a/README.md
+++ b/README.md
@@ -3,15 +3,39 @@
 
 LoxiGen is a tool that generates OpenFlow protocol libraries for a number of
 languages. It is composed of a frontend that parses wire protocol descriptions
-and a backend for each supported language (currently C and Python, with Java on
-the way).
+and a backend for each supported language (currently C, Python, and Java, with an 
+auto-generated wireshark dissector in Lua on the way). 
 
+LoxiGen currently supports OpenFlow Versions **1.0**, **1.1**, **1.2**, and **1.3.1**. Versions 1.0
+and 1.3.1 are actively used in production. Support for versions 1.1 and 1.2 is considered experimental.
+
+
+Prerequisites
+=============
+
+Running the unit tests requires [nosetests](http://nose.readthedocs.org/en/latest/).
+You can install it via easy_install,
+```
+easy_install nose
+```
+pip,
+```
+pip install nose
+```
+or via your distribution's package manager (example for Debian/Ubuntu):
+```
+sudo apt-get install python-nose
+```
+
+Nosetests is only required for running the unit tests, not for running LoxiGen
+itself. We do ask you to install it and use it before submitting pull requests,
+though.
 
 Usage
 =====
 
 You can run LoxiGen directly from the repository. There's no need to install it,
-and it has no dependencies beyond Python 2.6+.
+and it has no dependencies beyond Python 2.7+.
 
 To generate the libraries for all languages:
 
@@ -25,7 +49,9 @@
 make c
 ```
 
-The currently supported languages are `c` and `python`.
+The currently supported languages are `c`, `python` and `java`. There is an 
+experimental/partially completed backend that generates a lua wireshark dissector
+(`wireshark`).
 
 The generated libraries will be under the `loxi_output` directory. This can be
 changed with the `LOXI_OUTPUT_DIR` environment variable when using the Makefile.
@@ -39,3 +65,12 @@
 
 Please fork the repository on GitHub and send us a pull request. You might also
 be interested in the INTERNALS file which has notes about how LoxiGen works.
+
+Loxigen comes with a set of internal unit-tests, as well as with a set of tests
+for the generated artifacts. Be sure to run
+
+```
+make check-all
+```
+
+and correct any problems before submitting a pull request.
diff --git a/c_gen/build_of_g.py b/c_gen/build_of_g.py
new file mode 100755
index 0000000..f117bfd
--- /dev/null
+++ b/c_gen/build_of_g.py
@@ -0,0 +1,535 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import sys
+
+import re
+import string
+import os
+import glob
+import copy
+import collections
+import c_gen.of_g_legacy as of_g
+import c_gen.type_maps as type_maps
+import c_gen.loxi_utils_legacy as loxi_utils
+import loxi_globals
+import c_gen.identifiers as identifiers
+import pyparsing
+import loxi_front_end.parser as parser
+import c_gen.translation as translation
+import loxi_front_end.frontend as frontend
+from loxi_ir import *
+from generic_utils import *
+
+root_dir = os.path.dirname(os.path.realpath(__file__))
+
+versions = {}
+# TODO:  Put these in a class so they get documented
+
+## Dict indexed by version giving all info related to version
+#
+# This is local; after processing, the information is stored in
+# of_g variables.
+
+def add_class(wire_version, cls, members):
+    """
+    Process a class for the given version and update the unified
+    list of classes as needed.
+
+    @param wire_version The wire version for this class defn
+    @param cls The name of the class being added
+    @param members The list of members with offsets calculated
+    """
+    memid = 0
+
+    sig = loxi_utils.class_signature(members)
+    if cls in of_g.unified:
+        uc = of_g.unified[cls]
+        if wire_version in uc:
+            debug("Error adding %s to unified. Wire ver %d exists" %
+                  (cls, wire_version))
+            sys.exit(1)
+        uc[wire_version] = {}
+        # Check for a matching signature
+        for wver in uc:
+            if type(wver) != type(0): continue
+            if wver == wire_version: continue
+            if not "use_version" in uc[wver]:
+                if sig == loxi_utils.class_signature(uc[wver]["members"]):
+                    log("Matched %s, ver %d to ver %d" %
+                          (cls, wire_version, wver))
+                    # have a match with existing version
+                    uc[wire_version]["use_version"] = wver
+                    # What else to do?
+                    return
+    else:  # Haven't seen this entry before
+        log("Adding %s to unified list, ver %d" % (cls, wire_version))
+        of_g.unified[cls] = dict(union={})
+        uc = of_g.unified[cls]
+
+    # At this point, need to add members for this version
+    uc[wire_version] = dict(members = members)
+
+    # Per member processing:
+    #  Add to union list (I'm sure there's a better way)
+    #  Check if it's a list
+    union = uc["union"]
+    if not cls in of_g.ordered_members:
+        of_g.ordered_members[cls] = []
+    for member in members:
+        m_name = member["name"]
+        m_type = member["m_type"]
+        if m_name.find("pad") == 0:
+            continue
+        if m_name in union:
+            if not m_type == union[m_name]["m_type"]:
+                debug("ERROR:   CLASS: %s. VERSION %d. MEMBER: %s. TYPE: %s" %
+                      (cls, wire_version, m_name, m_type))
+                debug("    Type conflict adding member to unified set.")
+                debug("    Current union[%s]:" % m_name)
+                debug(union[m_name])
+                sys.exit(1)
+        else:
+            union[m_name] = dict(m_type=m_type, memid=memid)
+            memid += 1
+        if not m_name in of_g.ordered_members[cls]:
+            of_g.ordered_members[cls].append(m_name)
+
+def update_offset(cls, wire_version, name, offset, m_type):
+    """
+    Update (and return) the offset based on type.
+    @param cls The parent class
+    @param wire_version The wire version being processed
+    @param name The name of the data member
+    @param offset The current offset
+    @param m_type The type declaration being processed
+    @returns A pair (next_offset, len_update)  next_offset is the new offset
+    of the next object or -1 if this is a var-length object.  len_update
+    is the increment that should be added to the length.  Note that (for
+    of_match_v3) it is variable length, but it adds 8 bytes to the fixed
+    length of the object
+    If offset is already -1, do not update
+    Otherwise map to base type and count and update (if possible)
+    """
+    if offset < 0:    # Don't update offset once set to -1
+        return offset, 0
+
+    count, base_type = loxi_utils.type_dec_to_count_base(m_type)
+
+    len_update = 0
+    if base_type in of_g.of_mixed_types:
+        base_type = of_g.of_mixed_types[base_type][wire_version]
+
+    base_class = base_type[:-2]
+    if (base_class, wire_version) in of_g.is_fixed_length:
+        bytes = of_g.base_length[(base_class, wire_version)]
+    else:
+        if base_type == "of_match_v3_t":
+            # This is a special case: it has non-zero min length
+            # but is variable length
+            bytes = -1
+            len_update = 8
+        elif base_type in of_g.of_base_types:
+            bytes = of_g.of_base_types[base_type]["bytes"]
+        else:
+            print "UNKNOWN TYPE for %s %s: %s" % (cls, name, base_type)
+            log("UNKNOWN TYPE for %s %s: %s" % (cls, name, base_type))
+            bytes = -1
+
+    # If bytes
+    if bytes > 0:
+        len_update = count * bytes
+
+    if bytes == -1:
+        return -1, len_update
+
+    return offset + (count * bytes), len_update
+
+def calculate_offsets_and_lengths(ordered_classes, classes, wire_version):
+    """
+    Generate the offsets for fixed offset class members
+    Also calculate the class_sizes when possible.
+
+    @param classes The classes to process
+    @param wire_version The wire version for this set of classes
+
+    Updates global variables
+    """
+
+    lists = set()
+
+    # Generate offsets
+    for cls in ordered_classes:
+        fixed_offset = 0 # The last "good" offset seen
+        offset = 0
+        last_offset = 0
+        last_name = "-"
+        for member in classes[cls]:
+            m_type = member["m_type"]
+            name = member["name"]
+            if last_offset == -1:
+                if name == "pad":
+                    log("Skipping pad for special offset for %s" % cls)
+                else:
+                    log("SPECIAL OFS: Member %s (prev %s), class %s ver %d" %
+                          (name, last_name, cls, wire_version))
+                    if (((cls, name) in of_g.special_offsets) and
+                        (of_g.special_offsets[(cls, name)] != last_name)):
+                        debug("ERROR: special offset prev name changed")
+                        debug("  cls %s. name %s. version %d. was %s. now %s" %
+                              cls, name, wire_version,
+                              of_g.special_offsets[(cls, name)], last_name)
+                        sys.exit(1)
+                    of_g.special_offsets[(cls, name)] = last_name
+
+            member["offset"] = offset
+            if m_type.find("list(") == 0:
+                (list_name, base_type) = loxi_utils.list_name_extract(m_type)
+                lists.add(list_name)
+                member["m_type"] = list_name + "_t"
+                offset = -1
+            elif m_type.find("struct") == 0:
+                debug("ERROR found struct: %s.%s " % (cls, name))
+                sys.exit(1)
+            elif m_type == "octets":
+                log("offset gen skipping octets: %s.%s " % (cls, name))
+                offset = -1
+            else:
+                offset, len_update = update_offset(cls, wire_version, name,
+                                                  offset, m_type)
+                if offset != -1:
+                    fixed_offset = offset
+                else:
+                    fixed_offset += len_update
+                    log("offset is -1 for %s.%s version %d " %
+                        (cls, name, wire_version))
+            last_offset = offset
+            last_name = name
+        of_g.base_length[(cls, wire_version)] = fixed_offset
+        if (offset != -1):
+            of_g.is_fixed_length.add((cls, wire_version))
+    for list_type in lists:
+        classes[list_type] = []
+        of_g.ordered_classes[wire_version].append(list_type)
+        of_g.base_length[(list_type, wire_version)] = 0
+
+def order_and_assign_object_ids():
+    """
+    Order all classes and assign object ids to all classes.
+
+    This is done to promote a reasonable order of the objects, putting
+    messages first followed by non-messages.  No assumptions should be
+    made about the order, nor about contiguous numbering.  However, the
+    numbers should all be reasonably small allowing arrays indexed by
+    these enum values to be defined.
+    """
+
+    # Generate separate message and non-message ordered lists
+    for cls in of_g.unified:
+        if loxi_utils.class_is_message(cls):
+            of_g.ordered_messages.append(cls)
+        elif loxi_utils.class_is_list(cls):
+            of_g.ordered_list_objects.append(cls)
+        else:
+            of_g.ordered_non_messages.append(cls)
+
+    of_g.ordered_messages.sort()
+    of_g.ordered_pseudo_objects.sort()
+    of_g.ordered_non_messages.sort()
+    of_g.ordered_list_objects.sort()
+    of_g.standard_class_order.extend(of_g.ordered_messages)
+    of_g.standard_class_order.extend(of_g.ordered_non_messages)
+    of_g.standard_class_order.extend(of_g.ordered_list_objects)
+
+    # This includes pseudo classes for which most code is not generated
+    of_g.all_class_order.extend(of_g.ordered_messages)
+    of_g.all_class_order.extend(of_g.ordered_non_messages)
+    of_g.all_class_order.extend(of_g.ordered_list_objects)
+    of_g.all_class_order.extend(of_g.ordered_pseudo_objects)
+
+    # Assign object IDs
+    for cls in of_g.ordered_messages:
+        of_g.unified[cls]["object_id"] = of_g.object_id
+        of_g.object_id += 1
+    for cls in of_g.ordered_non_messages:
+        of_g.unified[cls]["object_id"] = of_g.object_id
+        of_g.object_id += 1
+    for cls in of_g.ordered_list_objects:
+        of_g.unified[cls]["object_id"] = of_g.object_id
+        of_g.object_id += 1
+    for cls in of_g.ordered_pseudo_objects:
+        of_g.unified[cls] = {}
+        of_g.unified[cls]["object_id"] = of_g.object_id
+        of_g.object_id += 1
+
+
+def initialize_versions():
+    """
+    Create an empty datastructure for each target version.
+    """
+
+    for version in loxi_globals.OFVersions.target_versions:
+        wire_version = version.wire_version
+        version_name = of_g.of_version_wire2name[wire_version]
+        of_g.wire_ver_map[wire_version] = version_name
+        versions[version_name] = dict(
+            version_name = version_name,
+            wire_version = wire_version,
+            classes = {})
+        of_g.ordered_classes[wire_version] = []
+
+    of_g.target_version_list = [ v.wire_version for v in loxi_globals.OFVersions.target_versions ]
+
+def build_ordered_classes():
+    """
+    Read in from files given on command line and update global state
+
+    @fixme Should select versions to support from command line
+    """
+
+    for version, protocol in loxi_globals.ir.items():
+        wire_version = version.wire_version
+        # Populate global state
+        version_name = of_g.of_version_wire2name[wire_version]
+
+        for ofclass in protocol.classes:
+            of_g.ordered_classes[wire_version].append(ofclass.name)
+            legacy_members = []
+            pad_count = 0
+            for m in ofclass.members:
+                if type(m) == OFPadMember:
+                    m_name = 'pad%d' % pad_count
+                    if m_name == 'pad0': m_name = 'pad'
+                    legacy_members.append(dict(m_type='uint8_t[%d]' % m.length,
+                                               name=m_name))
+                    pad_count += 1
+                else:
+                    # HACK the C backend does not yet support of_oxm_t
+                    if m.oftype == 'of_oxm_t':
+                        m_type = 'of_octets_t'
+                    else:
+                        enum = find(lambda e: e.name == m.oftype, protocol.enums)
+                        if enum and "wire_type" in enum.params:
+                            m_type = enum.params["wire_type"]
+                        else:
+                            m_type = m.oftype
+                    legacy_members.append(dict(m_type=m_type, name=m.name))
+            versions[version_name]['classes'][ofclass.name] = legacy_members
+
+        for enum in protocol.enums:
+            for entry in enum.entries:
+                identifiers.add_identifier(
+                    translation.loxi_name(entry.name),
+                    entry.name, enum.name, entry.value, wire_version,
+                    of_g.identifiers, of_g.identifiers_by_group)
+
+def populate_type_maps():
+    """
+    Use the type members in the IR to fill out the legacy type_maps.
+    """
+
+    def split_inherited_cls(cls):
+        if cls == 'of_meter_band_stats': # HACK not a subtype of of_meter_band
+            return None, None
+        for parent in sorted(type_maps.inheritance_data.keys(), reverse=True):
+            if cls.startswith(parent):
+                return (parent, cls[len(parent)+1:])
+        return None, None
+
+    def find_experimenter(parent, cls):
+        for experimenter in sorted(of_g.experimenter_name_to_id.keys(), reverse=True):
+            prefix = parent + '_' + experimenter
+            if cls.startswith(prefix) and cls != prefix:
+                return experimenter
+        return None
+
+    def find_type_value(ofclass, m_name):
+        for m in ofclass.members:
+            if isinstance(m, OFTypeMember) and m.name == m_name:
+                return m.value
+        raise KeyError("ver=%d, cls=%s, m_name=%s" % (wire_version, cls, m_name))
+
+    # Most inheritance classes: actions, instructions, etc
+    for version, protocol in loxi_globals.ir.items():
+        wire_version = version.wire_version
+        for ofclass in protocol.classes:
+            cls = ofclass.name
+            parent, subcls = split_inherited_cls(cls)
+            if not (parent and subcls):
+                continue
+            if parent == 'of_oxm':
+                type_len = find_type_value(ofclass, 'type_len')
+                oxm_class = (type_len >> 16) & 0xffff
+                if oxm_class != 0x8000:
+                    # Do not include experimenter OXMs in the main table
+                    val = type_maps.invalid_type
+                else:
+                    val = (type_len >> 8) & 0xff
+            else:
+                val = find_type_value(ofclass, 'type')
+            type_maps.inheritance_data[parent][wire_version][subcls] = val
+
+            # Extensions (only actions for now)
+            experimenter = find_experimenter(parent, cls)
+            if parent == 'of_action' and experimenter:
+                val = find_type_value(ofclass, 'subtype')
+                type_maps.extension_action_subtype[wire_version][experimenter][cls] = val
+                if wire_version >= of_g.VERSION_1_3:
+                    cls2 = parent + "_id" + cls[len(parent):]
+                    type_maps.extension_action_id_subtype[wire_version][experimenter][cls2] = val
+            elif parent == 'of_instruction' and experimenter:
+                val = find_type_value(ofclass, 'subtype')
+                type_maps.extension_instruction_subtype[wire_version][experimenter][cls] = val
+
+    # Messages
+    for version, protocol in loxi_globals.ir.items():
+        wire_version = version.wire_version
+        for ofclass in protocol.classes:
+            cls = ofclass.name
+            # HACK (though this is what loxi_utils.class_is_message() does)
+            if not [x for x in ofclass.members if isinstance(x, OFDataMember) and x.name == 'xid']:
+                continue
+            if type_maps.class_is_virtual(cls):
+                continue
+            subcls = cls[3:]
+            val = find_type_value(ofclass, 'type')
+            if not val in type_maps.message_types[wire_version].values():
+                type_maps.message_types[wire_version][subcls] = val
+
+            # Extensions
+            experimenter = find_experimenter('of', cls)
+            if experimenter and ofclass.is_subclassof("of_experimenter"):
+                val = find_type_value(ofclass, 'subtype')
+                type_maps.extension_message_subtype[wire_version][experimenter][cls] = val
+
+    type_maps.generate_maps()
+
+def analyze_input():
+    """
+    Add information computed from the input, including offsets and
+    lengths of struct members and the set of list and action_id types.
+    """
+
+    # Generate header classes for inheritance parents
+    for wire_version, ordered_classes in of_g.ordered_classes.items():
+        classes = versions[of_g.of_version_wire2name[wire_version]]['classes']
+        for cls in ordered_classes:
+            if cls in type_maps.inheritance_map:
+                new_cls = cls + '_header'
+                of_g.ordered_classes[wire_version].append(new_cls)
+                classes[new_cls] = classes[cls]
+
+    for wire_version in of_g.wire_ver_map.keys():
+        version_name = of_g.of_version_wire2name[wire_version]
+        calculate_offsets_and_lengths(
+            of_g.ordered_classes[wire_version],
+            versions[version_name]['classes'],
+            wire_version)
+
+def unify_input():
+    """
+    Create Unified View of Objects
+    """
+
+    global versions
+
+    # Add classes to unified in wire-format order so that it is easier
+    # to generate things later
+    keys = versions.keys()
+    keys.sort(reverse=True)
+    for version in keys:
+        wire_version = versions[version]["wire_version"]
+        classes = versions[version]["classes"]
+        for cls in of_g.ordered_classes[wire_version]:
+            add_class(wire_version, cls, classes[cls])
+
+
+def log_all_class_info():
+    """
+    Log the results of processing the input
+
+    Debug function
+    """
+
+    for cls in of_g.unified:
+        for v in of_g.unified[cls]:
+            if type(v) == type(0):
+                log("cls: %s. ver: %d. base len %d. %s" %
+                    (str(cls), v, of_g.base_length[(cls, v)],
+                     loxi_utils.class_is_var_len(cls,v) and "not fixed"
+                     or "fixed"))
+                if "use_version" in of_g.unified[cls][v]:
+                    log("cls %s: v %d mapped to %d" % (str(cls), v,
+                           of_g.unified[cls][v]["use_version"]))
+                if "members" in of_g.unified[cls][v]:
+                    for member in of_g.unified[cls][v]["members"]:
+                        log("   %-20s: type %-20s. offset %3d" %
+                            (member["name"], member["m_type"],
+                             member["offset"]))
+
+def generate_all_files():
+    """
+    Create the files for the language target
+    """
+    for (name, fn) in lang_module.targets.items():
+        path = of_g.options.install_dir + '/' + name
+        os.system("mkdir -p %s" % os.path.dirname(path))
+        with open(path, "w") as outfile:
+            fn(outfile, os.path.basename(name))
+        print("Wrote contents for " + name)
+
+if __name__ == '__main__':
+    of_g.loxigen_log_file = open("loxigen.log", "w")
+    of_g.loxigen_dbg_file = sys.stdout
+
+    of_g.process_commandline()
+    # @fixme Use command line params to select log
+
+    if not config_sanity_check():
+        debug("Config sanity check failed\n")
+        sys.exit(1)
+
+    # Import the language file
+    lang_file = "lang_%s" % of_g.options.lang
+    lang_module = __import__(lang_file)
+
+    # If list files, just list auto-gen files to stdout and exit
+    if of_g.options.list_files:
+        for name in lang_module.targets:
+            print of_g.options.install_dir + '/' + name
+        sys.exit(0)
+
+    log("\nGenerating files for target language %s\n" % of_g.options.lang)
+
+    initialize_versions()
+    read_input()
+    populate_type_maps()
+    analyze_input()
+    unify_input()
+    order_and_assign_object_ids()
+    log_all_class_info()
+    generate_all_files()
diff --git a/c_gen/c_code_gen.py b/c_gen/c_code_gen.py
index 09c4f2f..78a809e 100644
--- a/c_gen/c_code_gen.py
+++ b/c_gen/c_code_gen.py
@@ -31,18 +31,17 @@
 """
 
 import sys
-import of_g
+import c_gen.of_g_legacy as of_g
 import c_match
 from generic_utils import *
-import c_gen.c_type_maps as c_type_maps
-import loxi_front_end.type_maps as type_maps
-import loxi_front_end.flags as flags
-import loxi_utils.loxi_utils as loxi_utils
-import loxi_front_end.identifiers as identifiers
+from c_gen import flags, type_maps, c_type_maps
+import c_gen.loxi_utils_legacy as loxi_utils
+import loxi_globals
+
+import c_gen.identifiers as identifiers
 
 # 'property' is for queues. Could be trouble
 
-
 ################################################################
 #
 # Misc helper functions
@@ -64,18 +63,6 @@
     """
     return loxi_utils.enum_name(cls)
 
-def member_returns_val(cls, m_name):
-    """
-    Should get accessor return a value rather than void
-    @param cls The class name
-    @param m_name The member name
-    @return True if of_g config and the specific member allow a 
-    return value.  Otherwise False
-    """
-    m_type = of_g.unified[cls]["union"][m_name]["m_type"]
-    return (config_check("get_returns") =="value" and 
-            m_type in of_g.of_scalar_types)
-
 # TODO serialize match outside accessor?
 def accessor_return_type(a_type, m_type):
     if loxi_utils.accessor_returns_error(a_type, m_type):
@@ -139,7 +126,7 @@
 /**
  * For each identifier from an OpenFlow header file, a Loxi version
  * of the identifier is generated.  For example, ofp_port_flood becomes
- * OF_PORT_DEST_FLOOD.  Loxi provides the following macros related to 
+ * OF_PORT_DEST_FLOOD.  Loxi provides the following macros related to
  * OpenFlow identifiers (using OF_IDENT_ as an example below):
  *     OF_IDENT_BY_VERSION(version) Get the value for the specific version
  *     OF_IDENT_SUPPORTED(version) Boolean: Is OF_IDENT defined for version
@@ -248,7 +235,7 @@
         idents.sort()
         out.write("""
 /****************************************************************
- * Identifiers from %s 
+ * Identifiers from %s
  *****************************************************************/
 """ % group)
         for ident in idents:
@@ -284,8 +271,8 @@
                                                   of_g.target_version_list,
                                                   ident):
                 out.write("""\
-#define %(ident)s (%(value)s)
-#define %(ident)s_BY_VERSION(version) (%(value)s)
+#define %(ident)s (%(value)#x)
+#define %(ident)s_BY_VERSION(version) (%(value)#x)
 """ % dict(ident=ident,value=info["common_value"]))
             else: # Values differ between versions
                 # Generate version check and value by version
@@ -296,7 +283,7 @@
                         value = info["values_by_version"][version]
                     else:
                         value = identifiers.UNDEFINED_IDENT_VALUE
-                    val_list.append("%s" % value)
+                    val_list.append("%#x" % value)
                 out.write("""\
 #define %(ident)s_BY_VERSION(version)     \\
     OF_VALUE_BY_VERSION(version, %(val_str)s)
@@ -426,32 +413,49 @@
                                     of_match_t *match);
 extern int of_wire_buffer_of_match_set(of_object_t *obj, int offset,
                                     of_match_t *match, int cur_len);
-extern void of_extension_object_id_set(of_object_t *obj, of_object_id_t id);
 """)
 
     # gen_base_types(out)
 
-    gen_struct_typedefs(out)
-    gen_acc_pointer_typedefs(out)
-    gen_new_function_declarations(out)
-    if config_check("gen_unified_fns"):
-        gen_accessor_declarations(out)
-
-    gen_common_struct_definitions(out)
     gen_flow_add_setup_function_declarations(out)
-    if config_check("gen_fn_ptrs"): # Otherwise, all classes are from generic cls
-        gen_struct_definitions(out)
-    gen_generic_union(out)
-    gen_generics(out)
-    gen_top_static_functions(out)
     out.write("""
 /****************************************************************
  *
  * Declarations of maps between on-the-wire type values and LOCI identifiers
  *
  ****************************************************************/
+
+/**
+ * Generic experimenter type value.  Applies to all except
+ * top level message: Action, instruction, error, stats, queue_props, oxm
+ */
+#define OF_EXPERIMENTER_TYPE 0xffff
+
+int of_experimenter_stats_request_to_object_id(uint32_t experimenter, uint32_t subtype, int ver);
+int of_experimenter_stats_reply_to_object_id(uint32_t experimenter, uint32_t subtype, int ver);
+
+of_object_id_t of_action_to_object_id(int action, of_version_t version);
+of_object_id_t of_action_id_to_object_id(int action_id, of_version_t version);
+of_object_id_t of_instruction_to_object_id(int instruction, of_version_t version);
+of_object_id_t of_queue_prop_to_object_id(int queue_prop, of_version_t version);
+of_object_id_t of_table_feature_prop_to_object_id(int table_feature_prop, of_version_t version);
+of_object_id_t of_meter_band_to_object_id(int meter_band, of_version_t version);
+of_object_id_t of_hello_elem_to_object_id(int hello_elem, of_version_t version);
+of_object_id_t of_stats_reply_to_object_id(int stats_reply, of_version_t version);
+of_object_id_t of_stats_request_to_object_id(int stats_request, of_version_t version);
+of_object_id_t of_error_msg_to_object_id(uint16_t error_msg, of_version_t version);
+of_object_id_t of_flow_mod_to_object_id(int flow_mod, of_version_t version);
+of_object_id_t of_group_mod_to_object_id(int group_mod, of_version_t version);
+of_object_id_t of_oxm_to_object_id(uint32_t type_len, of_version_t version);
+of_object_id_t of_message_experimenter_to_object_id(of_message_t msg, of_version_t version);
+of_object_id_t of_message_to_object_id(of_message_t msg, int length);
+of_object_id_t of_bsn_tlv_to_object_id(int tlv_type, of_version_t version);
+
+int of_object_wire_init(of_object_t *obj, of_object_id_t base_object_id, int max_len);
+
+extern const int *const of_object_fixed_len[OF_VERSION_ARRAY_MAX];
+extern const int *const of_object_extra_len[OF_VERSION_ARRAY_MAX];
 """)
-    c_type_maps.gen_type_maps_header(out)
     c_type_maps.gen_type_data_header(out)
     c_match.gen_declarations(out)
     # @fixme Move debug stuff to own fn
@@ -486,309 +490,6 @@
     c_match.gen_serialize(out)
     c_match.gen_deserialize(out)
 
-def gen_len_offset_macros(out):
-    """
-    Special case length and offset calculations put directly into
-    loci.c as they are private.
-    """
-
-    out.write("""
-/****************************************************************
- * Special case macros for calculating variable lengths and offsets
- ****************************************************************/
-
-/**
- * Get a u16 directly from an offset in an object's wire buffer
- * @param obj An of_object_t object
- * @param offset Base offset of the uint16 relative to the object
- *
- */
-
-static inline int
-of_object_u16_get(of_object_t *obj, int offset) {
-    uint16_t val16;
-
-    of_wire_buffer_u16_get(obj->wire_object.wbuf,
-        obj->wire_object.obj_offset + offset, &val16);
-
-    return (int)val16;
-}
-
-/**
- * Set a u16 directly at an offset in an object's wire buffer
- * @param obj An of_object_t object
- * @param offset Base offset of the uint16 relative to the object
- * @param val The value to store
- *
- */
-
-static inline void
-of_object_u16_set(of_object_t *obj, int offset, int value) {
-    uint16_t val16;
-
-    val16 = (uint16_t)value;
-    of_wire_buffer_u16_set(obj->wire_object.wbuf,
-        obj->wire_object.obj_offset + offset, val16);
-}
-
-/**
- * Get length of an object with a TLV header with uint16_t
- * @param obj An object with a match member
- * @param offset The wire offset of the start of the object
- *
- * The length field follows the type field.
- */
-
-#define _TLV16_LEN(obj, offset) \\
-    (of_object_u16_get((of_object_t *)(obj), (offset) + 2))
-
-/**
- * Get length of an object that is the "rest" of the object
- * @param obj An object with a match member
- * @param offset The wire offset of the start of the object
- *
- */
-
-#define _END_LEN(obj, offset) ((obj)->length - (offset))
-
-/**
- * Get length of the action list object in a packet_out object
- * @param obj An object of type of_packet_out
- *
- * The length field is just before the end of the fixed length
- * part of the object in all versions.
- */
-
-#define _PACKET_OUT_ACTION_LEN(obj) \\
-    (of_object_u16_get((of_object_t *)(obj), \\
-     of_object_fixed_len[(obj)->version][OF_PACKET_OUT] - 2))
-
-/**
- * Set length of the action list object in a packet_out object
- * @param obj An object of type of_packet_out
- *
- * The length field is just before the end of the fixed length
- * part of the object in all versions.
- */
-
-#define _PACKET_OUT_ACTION_LEN_SET(obj, len) \\
-    (of_object_u16_set((of_object_t *)(obj), \\
-     of_object_fixed_len[(obj)->version][OF_PACKET_OUT] - 2, len))
-
-/*
- * Match structs in 1.2 come at the end of the fixed length part
- * of structures.  They add 8 bytes to the minimal length of the
- * message, but are also variable length.  This means that the 
- * type/length offsets are 8 bytes back from the end of the fixed 
- * length part of the object.  The right way to handle this is to 
- * expose the offset of the match member more explicitly.  For now, 
- * we make the calculation as described here.
- */
-
-/* 1.2 min length of match is 8 bytes */
-#define _MATCH_MIN_LENGTH_V3 8
-
-/**
- * The offset of a 1.2 match object relative to fixed length of obj
- */
-#define _MATCH_OFFSET_V3(fixed_obj_len) \\
-    ((fixed_obj_len) - _MATCH_MIN_LENGTH_V3)
-
-/**
- * The "extra" length beyond the minimal 8 bytes of a match struct
- * in an object
- */
-#define _MATCH_EXTRA_LENGTH_V3(obj, fixed_obj_len) \\
-    (OF_MATCH_BYTES(_TLV16_LEN(obj, _MATCH_OFFSET_V3(fixed_obj_len))) - \\
-     _MATCH_MIN_LENGTH_V3)
-
-/**
- * The offset of an object following a match object for 1.2
- */
-#define _OFFSET_FOLLOWING_MATCH_V3(obj, fixed_obj_len) \\
-    ((fixed_obj_len) + _MATCH_EXTRA_LENGTH_V3(obj, fixed_obj_len))
-
-/**
- * Get length of a match object from its wire representation
- * @param obj An object with a match member
- * @param match_offset The wire offset of the match object.
- *
- * See above; for 1.2, 
- * The match length is raw bytes but the actual space it takes
- * up is padded for alignment to 64-bits
- */
-#define _WIRE_MATCH_LEN(obj, match_offset) \\
-    (((obj)->version == OF_VERSION_1_0) ? %(match1)d : \\
-     (((obj)->version == OF_VERSION_1_1) ? %(match2)d : \\
-      _TLV16_LEN(obj, match_offset)))
-
-#define _WIRE_LEN_MIN 4
-
-/*
- * Wrapper function for match len.  There are cases where the wire buffer
- * has not been set with the proper minimum length.  In this case, the
- * wire match len is interpretted as its minimum length, 4 bytes.
- */
-
-static inline int
-wire_match_len(of_object_t *obj, int match_offset) {
-    int len;
-
-    len = _WIRE_MATCH_LEN(obj, match_offset);
-
-    return (len == 0) ? _WIRE_LEN_MIN : len;
-}
-
-#define _WIRE_MATCH_PADDED_LEN(obj, match_offset) \\
-    OF_MATCH_BYTES(wire_match_len((of_object_t *)(obj), (match_offset)))
-
-/**
- * Macro to calculate variable offset of instructions member in flow mod
- * @param obj An object of some type of flow modify/add/delete
- *
- * Get length of preceding match object and add to fixed length
- * Applies only to version 1.2
- */
-
-#define _FLOW_MOD_INSTRUCTIONS_OFFSET(obj) \\
-    _OFFSET_FOLLOWING_MATCH_V3(obj, %(flow_mod)d)
-
-/* The different flavors of flow mod all use the above */
-#define _FLOW_ADD_INSTRUCTIONS_OFFSET(obj) \\
-    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
-#define _FLOW_MODIFY_INSTRUCTIONS_OFFSET(obj) \\
-    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
-#define _FLOW_MODIFY_STRICT_INSTRUCTIONS_OFFSET(obj) \\
-    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
-#define _FLOW_DELETE_INSTRUCTIONS_OFFSET(obj) \\
-    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
-#define _FLOW_DELETE_STRICT_INSTRUCTIONS_OFFSET(obj) \\
-    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
-
-/**
- * Macro to calculate variable offset of instructions member in flow stats
- * @param obj An object of type of_flow_mod_t
- *
- * Get length of preceding match object and add to fixed length
- * Applies only to version 1.2 and 1.3
- */
-
-#define _FLOW_STATS_ENTRY_INSTRUCTIONS_OFFSET(obj) \\
-    _OFFSET_FOLLOWING_MATCH_V3(obj, %(flow_stats)d)
-
-/**
- * Macro to calculate variable offset of data (packet) member in packet_in
- * @param obj An object of type of_packet_in_t
- *
- * Get length of preceding match object and add to fixed length
- * Applies only to version 1.2 and 1.3
- */
-
-#define _PACKET_IN_DATA_OFFSET(obj) \\
-    _OFFSET_FOLLOWING_MATCH_V3((obj), (obj)->version == OF_VERSION_1_2 ? \
-%(packet_in)d : %(packet_in_1_3)d)
-
-/**
- * Macro to calculate variable offset of data (packet) member in packet_out
- * @param obj An object of type of_packet_out_t
- *
- * Find the length in the actions_len variable and add to the fixed len
- * Applies only to version 1.2 and 1.3
- */
-
-#define _PACKET_OUT_DATA_OFFSET(obj) (_PACKET_OUT_ACTION_LEN(obj) + \\
-     of_object_fixed_len[(obj)->version][OF_PACKET_OUT])
-
-/**
- * Macro to map port numbers that changed across versions
- * @param port The port_no_t variable holding the value
- * @param ver The OpenFlow version from which the value was extracted
- */
-#define OF_PORT_NO_VALUE_CHECK(port, ver) \\
-    if (((ver) == OF_VERSION_1_0) && ((port) > 0xff00)) (port) += 0xffff0000
-
-""" % dict(flow_mod=of_g.base_length[("of_flow_modify",of_g.VERSION_1_2)],
-           packet_in=of_g.base_length[("of_packet_in",of_g.VERSION_1_2)],
-           packet_in_1_3=of_g.base_length[("of_packet_in",of_g.VERSION_1_3)],
-           flow_stats=of_g.base_length[("of_flow_stats_entry",
-                                        of_g.VERSION_1_2)],
-           match1=of_g.base_length[("of_match_v1",of_g.VERSION_1_0)],
-           match2=of_g.base_length[("of_match_v2",of_g.VERSION_1_1)]))
-
-def gen_obj_id_macros(out):
-    """
-    Flow modify (add, delete) messages (and maybe others) use ID checks allowing
-    inheritance to use common accessor functions.
-    """
-    out.write("""
-/**
- * Macro to detect if an object ID falls in the "flow mod" family of objects
- * This includes add, modify, modify_strict, delete and delete_strict
- */
-#define IS_FLOW_MOD_SUBTYPE(object_id)                 \\
-    (((object_id) == OF_FLOW_MODIFY) ||                \\
-     ((object_id) == OF_FLOW_MODIFY_STRICT) ||         \\
-     ((object_id) == OF_FLOW_DELETE) ||                \\
-     ((object_id) == OF_FLOW_DELETE_STRICT) ||         \\
-     ((object_id) == OF_FLOW_ADD))
-""")
-
-
-def top_c_gen(out, name):
-    """
-    Generate code for
-    @param out The file handle to write to
-    @param name The name of the file
-    """
-    common_top_matter(out, name)
-    # Generic C code that needs to go into loci.c can go here.
-    out.write("""
-/****************************************************************
- *
- * This file is divided into the following sections.
- *
- * Instantiate strings such as object names
- * Special case macros for low level object access
- * Per-class, per-member accessor definitions
- * Per-class new/init function definitions
- * Per-class new/init pointer instantiations
- * Instantiate "set map" for pointer set fns
- *
- ****************************************************************/
-
-#ifdef __GNUC__
-#include <features.h>
-
-#if __GNUC_PREREQ(4,4)
-#pragma GCC optimize ("s")
-#endif
-
-#if __GNUC_PREREQ(4,6)
-#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
-#endif
-
-#endif
-
-#include <loci/loci.h>
-#include <loci/of_object.h>
-#include "loci_log.h"
-
-""")
-    gen_object_enum_str(out)
-    gen_len_offset_macros(out)
-    gen_obj_id_macros(out)
-    if config_check("gen_unified_fns"):
-        gen_accessor_definitions(out)
-    gen_new_function_definitions(out)
-    gen_init_map(out)
-    out.write("\n/* This code should be broken out to a different file */\n")
-    gen_setup_from_add_fns(out)
-
-def type_data_c_gen(out, name):
-    common_top_matter(out, name)
-    c_type_maps.gen_type_maps(out)
-    c_type_maps.gen_length_array(out)
-
 ################################################################
 # Top Matter
 ################################################################
@@ -796,6 +497,7 @@
 def common_top_matter(out, name):
     loxi_utils.gen_c_copy_license(out)
     out.write("""\
+
 /****************************************************************
  * File: %s
  *
@@ -991,6 +693,16 @@
 typedef char of_desc_str_t[OF_DESC_STR_LEN];
 typedef char of_serial_num_t[OF_SERIAL_NUM_LEN];
 
+typedef struct of_bitmap_128_s {
+    uint64_t hi;
+    uint64_t lo;
+} of_bitmap_128_t;
+
+typedef struct of_checksum_128_s {
+    uint64_t hi;
+    uint64_t lo;
+} of_checksum_128_t;
+
 /* These are types which change across versions.  */
 typedef uint32_t of_port_no_t;
 typedef uint16_t of_fm_cmd_t;
@@ -1081,7 +793,7 @@
 #include <loci/of_message.h>
 #include <loci/of_match.h>
 #include <loci/of_object.h>
-#include <loci/of_wire_buf.h>
+#include <loci/loci_classes.h>
 
 /****************************************************************
  *
@@ -1101,51 +813,6 @@
  ****************************************************************/
 """)
 
-def gen_top_static_functions(out):
-    out.write("""
-
-#define _MAX_PARENT_ITERATIONS 4
-/**
- * Iteratively update parent lengths thru hierarchy
- * @param obj The object whose length is being updated
- * @param delta The difference between the current and new lengths
- *
- * Note that this includes updating the object itself.  It will
- * iterate thru parents.
- *
- * Assumes delta > 0.
- */
-static inline void
-of_object_parent_length_update(of_object_t *obj, int delta)
-{
-#ifndef NDEBUG
-    int count = 0;
-    of_wire_buffer_t *wbuf;  /* For debug asserts only */
-#endif
-
-    while (obj != NULL) {
-        ASSERT(count++ < _MAX_PARENT_ITERATIONS);
-        obj->length += delta;
-        if (obj->wire_length_set != NULL) {
-            obj->wire_length_set(obj, obj->length);
-        }
-#ifndef NDEBUG
-        wbuf = obj->wire_object.wbuf;
-#endif
-
-        /* Asserts for wire length checking */
-        ASSERT(obj->length + obj->wire_object.obj_offset <=
-               WBUF_CURRENT_BYTES(wbuf));
-        if (obj->parent == NULL) {
-            ASSERT(obj->length + obj->wire_object.obj_offset ==
-                   WBUF_CURRENT_BYTES(wbuf));
-        }
-
-        obj = obj->parent;
-    }
-}
-""")
-
 ################################################################
 #
 ################################################################
@@ -1187,7 +854,7 @@
  */
 #define OF_VERSION_ARRAY_MAX %d
 """ % (max + 1))
-    
+
 def gen_object_enum(out):
     """
     Generate the enumerated type for object identification in LoxiGen
@@ -1302,63 +969,6 @@
 }
 """)
 
-def gen_object_enum_str(out):
-    out.write("\nconst char *const of_object_id_str[] = {\n")
-    out.write("    \"of_object\",\n")
-    for cls in of_g.ordered_messages:
-        out.write("    \"%s\",\n" % cls)
-    out.write("\n    /* Non-message objects */\n")
-    for cls in of_g.ordered_non_messages:
-        out.write("    \"%s\",\n" % cls)
-    out.write("\n    /* List objects */\n")
-    for cls in of_g.ordered_list_objects:
-        out.write("    \"%s\",\n" % cls)
-    out.write("\n    /* Generic stats request/reply types; pseudo objects */\n")
-    for cls in of_g.ordered_pseudo_objects:
-        out.write("    \"%s\",\n" % cls)
-    out.write("\n    \"of_unknown_object\"\n};\n")
-
-    # We'll do version strings while we're at it
-    out.write("""
- const char *const of_version_str[] = {
-    "Unknown OpenFlow Version",
-    "OpenFlow-1.0",
-    "OpenFlow-1.1",
-    "OpenFlow-1.2"
-};
-
-const of_mac_addr_t of_mac_addr_all_ones = {
-    {
-        0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-    }
-};
-/* Just to be explicit; static duration vars are init'd to 0 */
-const of_mac_addr_t of_mac_addr_all_zeros = {
-    {
-        0, 0, 0, 0, 0, 0
-    }
-};
-
-const of_ipv6_t of_ipv6_all_ones = {
-    {
-        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-    }
-};
-/* Just to be explicit; static duration vars are init'd to 0 */
-const of_ipv6_t of_ipv6_all_zeros = {
-    {
-        0, 0, 0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0, 0, 0
-    }
-};
-
-/** @var of_error_strings
- * The error string map; use abs value to index
- */
-const char *const of_error_strings[] = { OF_ERROR_STRINGS };
-""")
-
 ################################################################
 #
 # Internal Utility Functions
@@ -1390,8 +1000,6 @@
     member = of_g.unified[cls]["union"][m_name]
     m_type = member["m_type"]
     rv = "int"
-    if member_returns_val(cls, m_name):
-        rv = m_type
     if m_type[-2:] == "_t":
         m_type = m_type[:-2]
 
@@ -1474,7 +1082,7 @@
 
 def v3_match_offset_get(cls):
     """
-    Return the offset of an OF 1.2 match in an object if it has such; 
+    Return the offset of an OF 1.2 match in an object if it has such;
     otherwise return -1
     """
     result = field_ver_get(cls, "match")
@@ -1527,11 +1135,8 @@
     for cls in of_g.standard_class_order:
         if cls in type_maps.inheritance_map:
             continue
-        if config_check("gen_fn_ptrs"):
-            out.write("typedef struct %(cls)s_s %(cls)s_t;\n" % dict(cls=cls))
-        else:
-            template = "typedef of_object_t %(cls)s_t;\n"
-            out.write(template % dict(cls=cls))
+        template = "typedef of_object_t %(cls)s_t;\n"
+        out.write(template % dict(cls=cls))
 
     out.write("""
 /****************************************************************
@@ -1546,47 +1151,6 @@
 
 """)
 
-def gen_generic_union(out):
-    """
-    Generate the generic union object composing all LOCI objects
-
-    @param out The file to which to write the decs
-    """
-    out.write("""
-/**
- * The common LOCI object is a union of all possible objects.
- */
-union of_generic_u {
-    of_object_t object;  /* Common base class with fundamental accessors */
-
-    /* Message objects */
-""")
-    for cls in of_g.ordered_messages:
-        out.write("    %s_t %s;\n" % (cls, cls))
-    out.write("\n    /* Non-message composite objects */\n")
-    for cls in of_g.ordered_non_messages:
-        if cls in type_maps.inheritance_map:
-            continue
-        out.write("    %s_t %s;\n" % (cls, cls))
-    out.write("\n    /* List objects */\n")
-    for cls in of_g.ordered_list_objects:
-        out.write("    %s_t %s;\n" % (cls, cls))
-    out.write("};\n")
-
-def gen_common_struct_definitions(out):
-    out.write("""
-/****************************************************************
- *
- * Unified structure definitions
- *
- ****************************************************************/
-
-struct of_object_s {
-    /* Common members */
-%(common)s
-};
-""" % dict(common=of_g.base_object_members))
-
 def gen_flow_add_setup_function_declarations(out):
     """
     Add the declarations for functions that can be initialized
@@ -1653,7 +1217,7 @@
  *     idle_timeout
  *     hard_timeout
  *
- * Note that the actions/instructions of a flow may be modified by a 
+ * Note that the actions/instructions of a flow may be modified by a
  * subsequent flow modify message.  To facilitate implementations,
  * the "effects" parameter is provided.  If effects is NULL, the
  * actions/instructions are taken from the flow_add message.
@@ -1667,51 +1231,6 @@
                                         of_object_t *effects);
 """)
 
-def gen_struct_definitions(out):
-    """
-    Generate the declaration of all of_ C structures
-
-    @param out The file to which to write the decs
-    """
-
-    # This should only get called if gen_fn_ptr is true in code_gen_config
-    if not config_check("gen_fn_ptrs"):
-        debug("Error: gen_struct_defs called, but no fn ptrs set")
-        return
-
-    for cls in of_g.standard_class_order:
-        if cls in type_maps.inheritance_map:
-            continue # These are generated elsewhere
-        note = ""
-        if loxi_utils.class_is_message(cls):
-            note = " /* Class is message */"
-        out.write("struct %s_s {%s\n" % (cls, note))
-        out.write("""    /* Common members */
-%s
-    /* Class specific members */
-""" % of_g.base_object_members)
-        if loxi_utils.class_is_list(cls):
-            out.write("""
-    %(cls)s_first_f first;
-    %(cls)s_next_f next;
-    %(cls)s_append_bind_f append_bind;
-    %(cls)s_append_f append;
-};
-
-""" % {"cls": cls})
-            continue   # All done with list object
-
-        # Else, not a list instance; add accessors for all data members
-        for m_name in of_g.ordered_members[cls]:
-            if m_name in of_g.skip_members:
-                # These members (length, etc) are handled internally
-                continue
-            f_name = acc_name(cls, m_name)
-            out.write("    %s_get_f %s;\n" % (f_name, m_name + "_get"))
-            out.write("    %s_set_f %s;\n" % (f_name, m_name + "_set"))
-        out.write("};\n\n")
-
-
 ################################################################
 #
 # List accessor code generation
@@ -1817,7 +1336,7 @@
     %(i_call)s;
 
     /* Derive offset and length of child in parent */
-    OF_TRY(of_object_child_attach(parent, child, 
+    OF_TRY(of_object_child_attach(parent, child,
     if ((rv = of_list_first((of_object_t *)list, (of_object_t *)obj)) < 0) {
         return rv;
     }
@@ -1849,7 +1368,7 @@
         len_str = "obj->header.length"
     else:
         len_str = "obj->length"
-        
+
     out.write("""
 /**
  * Advance an iterator to the next element in a list
@@ -1936,7 +1455,7 @@
 #
 ################################################################
 
-    
+
 def gen_accessor_declarations(out):
     """
     Generate the declaration of each version independent accessor
@@ -1987,7 +1506,7 @@
     %(gparams)s);
 """ % dict(base_name=base_name, gparams=gparams, sparams=sparams,
            get_ret_type=get_ret_type, set_ret_type=set_ret_type))
-            
+
         if loxi_utils.class_is_list(cls):
             e_type = loxi_utils.list_to_entry_type(cls)
             out.write("""
@@ -2031,7 +1550,7 @@
         m_type = "octets_data"
     return "of_wire_buffer_%s_%s" % (m_type, a_type)
 
-def get_len_macro(cls, m_type, version):
+def get_len_macro(cls, m_name, m_type, version):
     """
     Get the length macro for m_type in cls
     """
@@ -2043,6 +1562,12 @@
         return "_TLV16_LEN(obj, offset)"
     if cls == "of_packet_out" and m_type == "of_list_action_t":
         return "_PACKET_OUT_ACTION_LEN(obj)"
+    if cls == "of_bsn_gentable_entry_add" and m_name == "key":
+        return "of_object_u16_get(obj, 18)"
+    if cls == "of_bsn_gentable_entry_desc_stats_entry" and m_name == "key":
+        return "of_object_u16_get(obj, 2)"
+    if cls == "of_bsn_gentable_entry_stats_entry" and m_name == "key":
+        return "of_object_u16_get(obj, 2)"
     # Default is everything to the end of the object
     return "_END_LEN(obj, offset)"
 
@@ -2069,6 +1594,12 @@
             pass
         elif (cls == "of_packet_out" and m_name == "data"):
             pass
+        elif (cls == "of_bsn_gentable_entry_add" and m_name == "value"):
+            pass
+        elif (cls == "of_bsn_gentable_entry_desc_stats_entry" and m_name == "value"):
+            pass
+        elif (cls == "of_bsn_gentable_entry_stats_entry" and m_name == "stats"):
+            pass
         else:
             debug("Error: Unknown member with offset == -1")
             debug("  cls %s, m_name %s, version %d" % (cls, m_name, version))
@@ -2083,7 +1614,7 @@
     if not loxi_utils.type_is_scalar(m_type):
         if loxi_utils.class_is_var_len(m_type[:-2], version) or \
                 m_type == "of_match_t":
-            len_macro = get_len_macro(cls, m_type, version)
+            len_macro = get_len_macro(cls, m_name, m_type, version)
         else:
             len_macro = "%d" % of_g.base_length[(m_type[:-2], version)]
         out.write("        cur_len = %s;\n" % len_macro)
@@ -2101,7 +1632,7 @@
         return of_g.base_length[(m_type[:-2], version)]
     print "Unknown length request", m_type, version
     sys.exit(1)
-        
+
 
 def gen_get_accessor_body(out, cls, m_type, m_name):
     """
@@ -2199,6 +1730,16 @@
     /* Special case for setting action lengths */
     _PACKET_OUT_ACTION_LEN_SET(obj, %(m_name)s->length);
 """ % dict(m_name=m_name))
+        elif cls == "of_bsn_gentable_entry_add" and m_name == "key":
+            out.write("""
+    /* Special case for setting key length */
+    of_object_u16_set(obj, 18, %(m_name)s->length);
+""" % dict(m_name=m_name))
+        elif cls in ["of_bsn_gentable_entry_desc_stats_entry", "of_bsn_gentable_entry_stats_entry"] and m_name == "key":
+            out.write("""
+    /* Special case for setting key length */
+    of_object_u16_set(obj, 2, %(m_name)s->length);
+""" % dict(m_name=m_name))
         elif m_type not in ["of_match_t", "of_octets_t"]:
             out.write("""
     /* @fixme Shouldn't this precede copying value's data to buffer? */
@@ -2231,7 +1772,7 @@
     sub_cls = m_type[:-2]
     out.write("""
 /**
- * Create a copy of %(m_name)s into a new variable of type %(m_type)s from 
+ * Create a copy of %(m_name)s into a new variable of type %(m_type)s from
  * a %(cls)s instance.
  *
  * @param obj Pointer to the source of type %(cls)s_t
@@ -2388,28 +1929,7 @@
     out.write("%s\n%s_%s_get(\n    %s)\n" % (ret_type, cls, m_name, params))
     gen_unified_acc_body(out, cls, m_name, ver_type_map, "get", m_type)
 
-def gen_accessor_definitions(out):
-    """
-    Generate the body of each version independent accessor
-
-    @param out The file to which to write the decs
-    """
-
-    out.write("""
-/****************************************************************
- *
- * Unified accessor function definitions
- *
- ****************************************************************/
-""")
-    for cls in of_g.standard_class_order:
-        if cls in type_maps.inheritance_map:
-            continue
-        out.write("\n/* Unified accessor functions for %s */\n" % cls)
-        if loxi_utils.class_is_list(cls):
-            gen_list_accessors(out, cls)
-            continue
-        out.write("/** \\ingroup %s \n * @{ */\n" % cls)
+def gen_accessor_definitions(out, cls):
         for m_name in of_g.ordered_members[cls]:
             if m_name in of_g.skip_members:
                 continue
@@ -2450,96 +1970,6 @@
             out.write("%s\n%s_%s_set(\n    %s)\n" % (ret_type, cls, m_name, params))
             gen_unified_acc_body(out, cls, m_name, ver_type_map, "set", m_type)
 
-        out.write("\n/** @} */\n")
-
-def gen_acc_pointer_typedefs(out):
-    """
-    Generate the function pointer typedefs for in-struct accessors
-    @param out The file to which to write the typedefs
-    """
-
-    out.write("""
-/****************************************************************
- *
- * Accessor function pointer typedefs
- *
- ****************************************************************/
-
-/*
- * Generic accessors:
- *
- * Many objects have a length represented in the wire buffer
- * wire_length_get and wire_length_set access these values directly on the
- * wire.
- *
- * Many objects have a length represented in the wire buffer
- * wire_length_get and wire_length_set access these values directly on the
- * wire.
- *
- * FIXME: TBD if wire_length_set and wire_type_set are required.
- */
-typedef void (*of_wire_length_get_f)(of_object_t *obj, int *bytes);
-typedef void (*of_wire_length_set_f)(of_object_t *obj, int bytes);
-typedef void (*of_wire_type_get_f)(of_object_t *obj, of_object_id_t *id);
-typedef void (*of_wire_type_set_f)(of_object_t *obj, of_object_id_t id);
-""")
-    # If not using function pointers in classes, don't gen typedefs below
-    if not config_check("gen_fn_ptrs"):
-        return
-
-    # For each class, for each type it uses, generate a typedef
-    for cls in of_g.standard_class_order:
-        if cls in type_maps.inheritance_map:
-            continue
-        out.write("\n/* Accessor function pointer typedefs for %s */\n" % cls)
-        types_done = list()
-        for m_name in of_g.ordered_members[cls]:
-            (m_type, get_rv) = get_acc_rv(cls, m_name)
-            if (m_type, get_rv) in types_done:
-                continue
-            types_done.append((m_type, get_rv))
-            fn = "%s_%s" % (cls, m_type)
-            params = ", ".join(param_list(cls, m_name, "get"))
-            out.write("typedef int (*%s_get_f)(\n    %s);\n" %
-                      (fn, params))
-
-            params = ", ".join(param_list(cls, m_name, "set"))
-            out.write("typedef int (*%s_set_f)(\n    %s);\n" %
-                      (fn, params))
-        if loxi_utils.class_is_list(cls):
-            obj_type = loxi_utils.list_to_entry_type(cls)
-            out.write("""typedef int (*%(cls)s_first_f)(
-    %(cls)s_t *list,
-    %(obj_type)s_t *obj);
-typedef int (*%(cls)s_next_f)(
-    %(cls)s_t *list,
-    %(obj_type)s_t *obj);
-typedef int (*%(cls)s_append_bind_f)(
-    %(cls)s_t *list,
-    %(obj_type)s_t *obj);
-typedef int (*%(cls)s_append_f)(
-    %(cls)s_t *list,
-    %(obj_type)s_t *obj);
-""" % {"cls":cls, "obj_type":obj_type})
-
-#             out.write("""
-# typedef int (*%(cls)s_get_f)(
-#     %(cls)s_t *list,
-#     %(obj_type)s_t *obj, int index);
-# typedef int (*%(cls)s_set_f)(
-#     %(cls)s_t *list,
-#     %(obj_type)s_t *obj, int index);
-# typedef int (*%(cls)s_append_f)(
-#     %(cls)s_t *list,
-#     %(obj_type)s_t *obj, int index);
-# typedef int (*%(cls)s_insert_f)(
-#     %(cls)s_t *list,
-#     %(obj_type)s_t *obj, int index);
-# typedef int (*%(cls)s_remove_f)(
-#     %(cls)s_t *list,
-#     int index);
-# """ % {"cls":cls, "obj_type":obj_type})
-
 ################################################################
 #
 # New/Delete Function Definitions
@@ -2560,21 +1990,6 @@
     return fn
 
 
-def instantiate_fn_ptrs(cls, ilvl, out):
-    """
-    Generate the C code to instantiate function pointers for a class
-    @param cls The class name
-    @param ilvl The base indentation level
-    @param out The file to which to write the functions
-    """
-    for m_name in of_g.ordered_members[cls]:
-        if m_name in of_g.skip_members:
-            continue
-        out.write(" " * ilvl + "obj->%s_get = %s_%s_get;\n" %
-                  (m_name, cls, m_name))
-        out.write(" " * ilvl + "obj->%s_set = %s_%s_set;\n" %
-                  (m_name, cls, m_name))
-
 ################################################################
 # Routines to generate the body of new/delete functions
 ################################################################
@@ -2602,7 +2017,7 @@
  * If bytes < 0, then the default fixed length is used for the object
  *
  * This is a "coerce" function that sets up the pointers for the
- * accessors properly.  
+ * accessors properly.
  *
  * If anything other than 0 is passed in for the buffer size, the underlying
  * wire buffer will have 'grow' called.
@@ -2628,7 +2043,7 @@
         MEMSET(obj, 0, sizeof(*obj));
     }
     if (bytes < 0) {
-        bytes = of_object_fixed_len[version][%(enum)s];
+        bytes = of_object_fixed_len[version][%(enum)s] + of_object_extra_len[version][%(enum)s];
     }
     obj->version = version;
     obj->length = bytes;
@@ -2667,61 +2082,30 @@
 {
 """ % dict(cls=cls))
 
+    import loxi_globals
+    uclass = loxi_globals.unified.class_by_name(cls)
+    if uclass and not uclass.virtual and uclass.has_type_members:
+        out.write("""
+    %(cls)s_push_wire_types(obj);
+""" % dict(cls=cls))
+
     if loxi_utils.class_is_message(cls):
         out.write("""
-    /* Message obj; push version, length and type to wire */
+    /* Message obj; set length */
     of_message_t msg;
 
     if ((msg = OF_OBJECT_TO_MESSAGE(obj)) != NULL) {
-        of_message_version_set(msg, obj->version);
         of_message_length_set(msg, obj->length);
-        OF_TRY(of_wire_message_object_id_set(OF_OBJECT_TO_WBUF(obj),
-                 %(name)s));
     }
 """ % dict(name = enum_name(cls)))
- 
-        for version in of_g.of_version_range:
-            if type_maps.class_is_extension(cls, version):
-                exp_name = type_maps.extension_to_experimenter_macro_name(cls)
-                subtype = type_maps.extension_message_to_subtype(cls, version)
-                if subtype is None or exp_name is None:
-                    print "Error in mapping extension message"
-                    print cls, version
-                    sys.exit(1)
-                out.write("""
-    if (obj->version == %(version)s) {
-        of_message_experimenter_id_set(OF_OBJECT_TO_MESSAGE(obj),
-                                       %(exp_name)s);
-        of_message_experimenter_subtype_set(OF_OBJECT_TO_MESSAGE(obj),
-                                            %(subtype)s);
-    }
-""" % dict(exp_name=exp_name, version=of_g.wire_ver_map[version],
-           subtype=str(subtype)))
-           
+
     else: # Not a message
         if loxi_utils.class_is_tlv16(cls):
             out.write("""
-    /* TLV obj; set length and type */
+    /* TLV obj; set length */
     of_tlv16_wire_length_set((of_object_t *)obj, obj->length);
-    of_tlv16_wire_object_id_set((of_object_t *)obj,
-           %(enum)s);
 """ % dict(enum=enum_name(cls)))
-            # Some tlv16 types may be extensions requiring more work
-            if cls in ["of_action_bsn_mirror", "of_action_id_bsn_mirror",
-                       "of_action_bsn_set_tunnel_dst", "of_action_id_bsn_set_tunnel_dst",
-                       "of_action_nicira_dec_ttl", "of_action_id_nicira_dec_ttl"]:
-                out.write("""
-    /* Extended TLV obj; Call specific accessor */
-    of_extension_object_id_set(obj, %(enum)s);
-""" % dict(cls=cls, enum=enum_name(cls)))
-                
 
-        if loxi_utils.class_is_oxm(cls):
-            out.write("""\
-    /* OXM obj; set length and type */
-    of_oxm_wire_length_set((of_object_t *)obj, obj->length);
-    of_oxm_wire_object_id_set((of_object_t *)obj, %(enum)s);
-""" % dict(enum=enum_name(cls)))
         if loxi_utils.class_is_u16_len(cls) or cls == "of_packet_queue":
             out.write("""
     obj->wire_length_set((of_object_t *)obj, obj->length);
@@ -2770,12 +2154,12 @@
  */
 
 %(cls)s_t *
-%(cls)s_new_(of_version_t version)
+%(cls)s_new(of_version_t version)
 {
     %(cls)s_t *obj;
     int bytes;
 
-    bytes = of_object_fixed_len[version][%(enum)s];
+    bytes = of_object_fixed_len[version][%(enum)s] + of_object_extra_len[version][%(enum)s];
 
     /* Allocate a maximum-length wire buffer assuming we'll be appending to it. */
     if ((obj = (%(cls)s_t *)of_object_new(OF_WIRE_BUFFER_MAX_LENGTH)) == NULL) {
@@ -2805,25 +2189,6 @@
     out.write("""
     return obj;
 }
-
-#if defined(OF_OBJECT_TRACKING)
-
-/*
- * Tracking objects.  Call the new function and then record location
- */
-
-%(cls)s_t *
-%(cls)s_new_tracking(of_version_t version,
-     const char *file, int line)
-{
-    %(cls)s_t *obj;
-
-    obj = %(cls)s_new_(version);
-    of_object_track((of_object_t *)obj, file, line);
-
-    return obj;
-}
-#endif
 """ % dict(cls=cls))
 
 
@@ -2844,7 +2209,7 @@
  */
 
 %(cls)s_t *
-%(cls)s_new_from_message_(of_message_t msg)
+%(cls)s_new_from_message(of_message_t msg)
 {
     %(cls)s_t *obj = NULL;
     of_version_t version;
@@ -2873,25 +2238,6 @@
 
     return obj;
 }
-
-#if defined(OF_OBJECT_TRACKING)
-
-/*
- * Tracking objects.  Call the new function and then record location
- */
-
-%(cls)s_t *
-%(cls)s_new_from_message_tracking(of_message_t msg,
-    const char *file, int line)
-{
-    %(cls)s_t *obj;
-
-    obj = %(cls)s_new_from_message_(msg);
-    of_object_track((of_object_t *)obj, file, line);
-
-    return obj;
-}
-#endif
 """ % dict(cls=cls))
 
 
@@ -2921,58 +2267,15 @@
  *
  ****************************************************************/
 """)
-    out.write("""
-/*
- * If object tracking is enabled, map "new" and "new from msg"
- * calls to tracking versions; otherwise, directly to internal
- * versions of fns which have the same name but end in _.
- */
-
-#if defined(OF_OBJECT_TRACKING)
-""")
-    for cls in of_g.standard_class_order:
-        out.write("""
-extern %(cls)s_t *
-    %(cls)s_new_tracking(of_version_t version,
-        const char *file, int line);
-#define %(cls)s_new(version) \\
-    %(cls)s_new_tracking(version, \\
-        __FILE__, __LINE__)
-""" % dict(cls=cls))
-        if loxi_utils.class_is_message(cls):
-            out.write("""extern %(cls)s_t *
-    %(cls)s_new_from_message_tracking(of_message_t msg,
-        const char *file, int line);
-#define %(cls)s_new_from_message(msg) \\
-    %(cls)s_new_from_message_tracking(msg, \\
-        __FILE__, __LINE__)
-""" % dict(cls=cls))
-
-    out.write("""
-#else /* No object tracking */
-""")
-    for cls in of_g.standard_class_order:
-        out.write("""
-#define %(cls)s_new(version) \\
-    %(cls)s_new_(version)
-""" % dict(cls=cls))
-        if loxi_utils.class_is_message(cls):
-            out.write("""#define %(cls)s_new_from_message(msg) \\
-    %(cls)s_new_from_message_(msg)
-""" % dict(cls=cls))
-
-    out.write("""
-#endif /* Object tracking */
-""")
 
     for cls in of_g.standard_class_order:
         out.write("""
 extern %(cls)s_t *
-    %(cls)s_new_(of_version_t version);
+    %(cls)s_new(of_version_t version);
 """ % dict(cls=cls))
         if loxi_utils.class_is_message(cls):
             out.write("""extern %(cls)s_t *
-    %(cls)s_new_from_message_(of_message_t msg);
+    %(cls)s_new_from_message(of_message_t msg);
 """ % dict(cls=cls))
         out.write("""extern void %(cls)s_init(
     %(cls)s_t *obj, of_version_t version, int bytes, int clean_wire);
@@ -3029,6 +2332,12 @@
     /* Set up the object's function pointers */
 """)
 
+    uclass = loxi_globals.unified.class_by_name(cls)
+    if uclass and not uclass.virtual and uclass.has_type_members:
+        out.write("""
+    obj->wire_type_set = %(cls)s_push_wire_types;
+""" % dict(cls=cls))
+
     if loxi_utils.class_is_message(cls):
         out.write("""
     obj->wire_length_get = of_object_message_wire_length_get;
@@ -3039,7 +2348,6 @@
             if not (cls in type_maps.inheritance_map): # Don't set for super
                 out.write("""
     obj->wire_length_set = of_tlv16_wire_length_set;
-    obj->wire_type_set = of_tlv16_wire_object_id_set;\
 """)
             out.write("""
     obj->wire_length_get = of_tlv16_wire_length_get;
@@ -3072,12 +2380,14 @@
                     out.write("""
     obj->wire_type_get = of_hello_elem_wire_object_id_get;
 """)
+            if loxi_utils.class_is_bsn_tlv(cls):
+                    out.write("""
+    obj->wire_type_get = of_bsn_tlv_wire_object_id_get;
+""")
         if loxi_utils.class_is_oxm(cls):
             out.write("""
     obj->wire_length_get = of_oxm_wire_length_get;
-    obj->wire_length_set = of_oxm_wire_length_set;
     obj->wire_type_get = of_oxm_wire_object_id_get;
-    obj->wire_type_set = of_oxm_wire_object_id_set;
 """)
         if loxi_utils.class_is_u16_len(cls):
             out.write("""
@@ -3099,57 +2409,17 @@
     obj->wire_length_set = of_meter_stats_wire_length_set;
 """)
 
-    if config_check("gen_fn_ptrs"):
-        if loxi_utils.class_is_list(cls):
-            out.write("""
-    obj->first = %(cls)s_first;
-    obj->next = %(cls)s_next;
-    obj->append = %(cls)s_append;
-    obj->append_bind = %(cls)s_append_bind;
-""" % dict(cls=cls))
-        else:
-            instantiate_fn_ptrs(cls, 4, out)
-
-def gen_new_function_definitions(out):
+def gen_new_function_definitions(out, cls):
     """
     Generate the new operator for all classes
 
     @param out The file to which to write the functions
     """
 
-    out.write("\n/* New operators for each message class */\n")
-    for cls in of_g.standard_class_order:
-        out.write("\n/* New operators for %s */\n" % cls)
-        gen_new_fn_body(cls, out)
-        gen_init_fn_body(cls, out)
-        if loxi_utils.class_is_message(cls):
-            gen_from_message_fn_body(cls, out)
-
-def gen_init_map(out):
-    """
-    Generate map from object ID to type coerce function
-    """
-    out.write("""
-/**
- * Map from object ID to type coerce function
- */
-const of_object_init_f of_object_init_map[] = {
-    (of_object_init_f)NULL,
-""")
-    count = 1
-    for i, cls in enumerate(of_g.standard_class_order):
-        if count != of_g.unified[cls]["object_id"]:
-            print "Error in class mapping: object IDs not sequential"
-            print cls, count, of_g.unified[cls]["object_id"]
-            sys.exit(1)
-        s = "(of_object_init_f)%s_init" % cls
-        if cls in type_maps.inheritance_map:
-            s = "(of_object_init_f)%s_header_init" % cls
-        if i < len(of_g.standard_class_order) - 1:
-            s += ","
-        out.write("    %-65s /* %d */\n" % (s, count))
-        count += 1
-    out.write("};\n")
+    gen_new_fn_body(cls, out)
+    gen_init_fn_body(cls, out)
+    if loxi_utils.class_is_message(cls):
+        gen_from_message_fn_body(cls, out)
 
 """
 Document generation functions
@@ -3175,7 +2445,7 @@
 
         out.write("""
 /**
- * Structure for %(cls)s object.  Get/set 
+ * Structure for %(cls)s object.  Get/set
  * accessors available in all versions unless noted otherwise
  *
 """ % dict(cls=cls))
@@ -3310,7 +2580,7 @@
 """)
 
 def gen_jump_table_template(out=sys.stdout, all_unhandled=True,
-                            cxn_type="ls_cxn_handle_t", 
+                            cxn_type="ls_cxn_handle_t",
                             unhandled="unhandled_message"):
     """
     Generate a template for a jump table.
@@ -3340,11 +2610,11 @@
         if not all_unhandled:
             fn_name = "%s_handler" % cls[3:]
         out.write("    %s%s /* %s */\n" % (fn_name, comma, enum_name(cls)))
-            
+
     out.write("};\n")
 
 def gen_message_switch_stmt_tmeplate(out=sys.stdout, all_unhandled=True,
-                                     cxn_type="ls_cxn_handle_t", 
+                                     cxn_type="ls_cxn_handle_t",
                                      unhandled="unhandled_message"):
     out.write("""
 /*
@@ -3417,221 +2687,3 @@
 }
 """ % dict(s_cls=cls[3:], cls=cls, cxn_type=cxn_type))
     gen_message_switch_stmt_tmeplate(out, False, cxn_type)
-
-def gen_setup_from_add_fns(out):
-    """
-    Generate functions that setup up objects based on an add
-
-    Okay, this is getting out of hand.  We need to refactor the code
-    so that this can be done without so much pain.
-    """
-    out.write("""
-
-/* Flow stats entry setup for all versions */
-
-static int
-flow_stats_entry_setup_from_flow_add_common(of_flow_stats_entry_t *obj,
-                                            of_flow_add_t *flow_add,
-                                            of_object_t *effects,
-                                            int entry_match_offset,
-                                            int add_match_offset)
-{
-    of_list_action_t actions;
-    int entry_len, add_len;
-    of_wire_buffer_t *wbuf;
-    int abs_offset;
-    int delta;
-    uint16_t val16;
-    uint64_t cookie;
-    of_octets_t match_octets;
-
-    /* Effects may come from different places */
-    if (effects != NULL) {
-        OF_TRY(of_flow_stats_entry_actions_set(obj,
-               (of_list_action_t *)effects));
-    } else {
-        of_flow_add_actions_bind(flow_add, &actions);
-        OF_TRY(of_flow_stats_entry_actions_set(obj, &actions));
-    }
-
-    /* Transfer the match underlying object from add to stats entry */
-    wbuf = OF_OBJECT_TO_WBUF(obj);
-    entry_len = _WIRE_MATCH_PADDED_LEN(obj, entry_match_offset);
-    add_len = _WIRE_MATCH_PADDED_LEN(flow_add, add_match_offset);
-
-    match_octets.bytes = add_len;
-    match_octets.data = OF_OBJECT_BUFFER_INDEX(flow_add, add_match_offset);
-
-    /* Copy data into flow entry */
-    abs_offset = OF_OBJECT_ABSOLUTE_OFFSET(obj, entry_match_offset);
-    of_wire_buffer_replace_data(wbuf, abs_offset, entry_len,
-                                match_octets.data, add_len);
-
-    /* Not scalar, update lengths if needed */
-    delta = add_len - entry_len;
-    if (delta != 0) {
-        /* Update parent(s) */
-        of_object_parent_length_update((of_object_t *)obj, delta);
-    }
-
-    of_flow_add_cookie_get(flow_add, &cookie);
-    of_flow_stats_entry_cookie_set(obj, cookie);
-
-    of_flow_add_priority_get(flow_add, &val16);
-    of_flow_stats_entry_priority_set(obj, val16);
-
-    of_flow_add_idle_timeout_get(flow_add, &val16);
-    of_flow_stats_entry_idle_timeout_set(obj, val16);
-
-    of_flow_add_hard_timeout_get(flow_add, &val16);
-    of_flow_stats_entry_hard_timeout_set(obj, val16);
-
-    return OF_ERROR_NONE;
-}
-
-/* Flow removed setup for all versions */
-
-static int
-flow_removed_setup_from_flow_add_common(of_flow_removed_t *obj,
-                                        of_flow_add_t *flow_add,
-                                        int removed_match_offset,
-                                        int add_match_offset)
-{
-    int add_len, removed_len;
-    of_wire_buffer_t *wbuf;
-    int abs_offset;
-    int delta;
-    uint16_t val16;
-    uint64_t cookie;
-    of_octets_t match_octets;
-
-    /* Transfer the match underlying object from add to removed obj */
-    wbuf = OF_OBJECT_TO_WBUF(obj);
-    removed_len = _WIRE_MATCH_PADDED_LEN(obj, removed_match_offset);
-    add_len = _WIRE_MATCH_PADDED_LEN(flow_add, add_match_offset);
-
-    match_octets.bytes = add_len;
-    match_octets.data = OF_OBJECT_BUFFER_INDEX(flow_add, add_match_offset);
-
-    /* Copy data into flow removed */
-    abs_offset = OF_OBJECT_ABSOLUTE_OFFSET(obj, removed_match_offset);
-    of_wire_buffer_replace_data(wbuf, abs_offset, removed_len,
-                                match_octets.data, add_len);
-
-    /* Not scalar, update lengths if needed */
-    delta = add_len - removed_len;
-    if (delta != 0) {
-        /* Update parent(s) */
-        of_object_parent_length_update((of_object_t *)obj, delta);
-    }
-
-    of_flow_add_cookie_get(flow_add, &cookie);
-    of_flow_removed_cookie_set(obj, cookie);
-
-    of_flow_add_priority_get(flow_add, &val16);
-    of_flow_removed_priority_set(obj, val16);
-
-    of_flow_add_idle_timeout_get(flow_add, &val16);
-    of_flow_removed_idle_timeout_set(obj, val16);
- 
-    if (obj->version >= OF_VERSION_1_2) {
-        of_flow_add_hard_timeout_get(flow_add, &val16);
-        of_flow_removed_hard_timeout_set(obj, val16);
-    }
-
-    return OF_ERROR_NONE;
-}
-
-/* Set up a flow removed message from the original add */
-
-int
-of_flow_removed_setup_from_flow_add(of_flow_removed_t *obj,
-                                    of_flow_add_t *flow_add)
-{
-    switch (obj->version) {
-    case OF_VERSION_1_0:
-        return flow_removed_setup_from_flow_add_common(obj, flow_add, 
-                                                       8, 8);
-        break;
-    case OF_VERSION_1_1:
-    case OF_VERSION_1_2:
-    case OF_VERSION_1_3:
-        return flow_removed_setup_from_flow_add_common(obj, flow_add, 
-                                                       48, 48);
-        break;
-    default:
-        return OF_ERROR_VERSION;
-        break;
-    }
-
-    return OF_ERROR_NONE;
-}
-
-
-/* Set up a packet in message from the original add */
-
-int
-of_packet_in_setup_from_flow_add(of_packet_in_t *obj,
-                                 of_flow_add_t *flow_add)
-{
-    int add_len, pkt_in_len;
-    of_wire_buffer_t *wbuf;
-    int abs_offset;
-    int delta;
-    const int pkt_in_match_offset = 16;
-    const int add_match_offset = 48;
-    of_octets_t match_octets;
-
-    if (obj->version < OF_VERSION_1_2) {
-        /* Nothing to be done before OF 1.2 */
-        return OF_ERROR_NONE;
-    }
-
-    /* Transfer match struct from flow add to packet in object */
-    wbuf = OF_OBJECT_TO_WBUF(obj);
-    pkt_in_len = _WIRE_MATCH_PADDED_LEN(obj, pkt_in_match_offset);
-    add_len = _WIRE_MATCH_PADDED_LEN(flow_add, add_match_offset);
-
-    match_octets.bytes = add_len;
-    match_octets.data = OF_OBJECT_BUFFER_INDEX(flow_add, add_match_offset);
-
-    /* Copy data into pkt_in msg */
-    abs_offset = OF_OBJECT_ABSOLUTE_OFFSET(obj, pkt_in_match_offset);
-    of_wire_buffer_replace_data(wbuf, abs_offset, pkt_in_len,
-                                match_octets.data, add_len);
-
-    /* Not scalar, update lengths if needed */
-    delta = add_len - pkt_in_len;
-    if (delta != 0) {
-        /* Update parent(s) */
-        of_object_parent_length_update((of_object_t *)obj, delta);
-    }
-
-    return OF_ERROR_NONE;
-}
-
-/* Set up a stats entry from the original add */
-
-int
-of_flow_stats_entry_setup_from_flow_add(of_flow_stats_entry_t *obj,
-                                        of_flow_add_t *flow_add,
-                                        of_object_t *effects)
-{
-    switch (obj->version) {
-    case OF_VERSION_1_0:
-        return flow_stats_entry_setup_from_flow_add_common(obj, flow_add,
-                                                           effects, 4, 8);
-        break;
-    case OF_VERSION_1_1:
-    case OF_VERSION_1_2:
-    case OF_VERSION_1_3:
-        return flow_stats_entry_setup_from_flow_add_common(obj, flow_add, 
-                                                           effects, 48, 48);
-        break;
-    default:
-        return OF_ERROR_VERSION;
-    }
-
-    return OF_ERROR_NONE;
-}
-""")
diff --git a/c_gen/c_dump_gen.py b/c_gen/c_dump_gen.py
index 08abae0..a0af14e 100644
--- a/c_gen/c_dump_gen.py
+++ b/c_gen/c_dump_gen.py
@@ -33,13 +33,13 @@
 """
 
 import sys
-import of_g
-import loxi_front_end.match as match
-import loxi_front_end.flags as flags
+import c_gen.of_g_legacy as of_g
+import c_gen.match as match
+import c_gen.flags as flags
 from generic_utils import *
-import loxi_front_end.type_maps as type_maps
-import loxi_utils.loxi_utils as loxi_utils
-import loxi_front_end.identifiers as identifiers
+import c_gen.type_maps as type_maps
+import c_gen.loxi_utils_legacy as loxi_utils
+import c_gen.identifiers as identifiers
 from c_test_gen import var_name_map
 
 def gen_obj_dump_h(out, name):
@@ -49,7 +49,7 @@
  *
  * AUTOMATICALLY GENERATED FILE.  Edits will be lost on regen.
  *
- * Header file for object dumping. 
+ * Header file for object dumping.
  */
 
 /**
@@ -75,9 +75,9 @@
 
 
 /**
- * Dump any OF object. 
+ * Dump any OF object.
  */
-int of_object_dump(loci_writer_f writer, void* cookie, of_object_t* obj); 
+int of_object_dump(loci_writer_f writer, void* cookie, of_object_t* obj);
 
 
 
@@ -110,8 +110,8 @@
  *
  * AUTOMATICALLY GENERATED FILE.  Edits will be lost on regen.
  *
- * Source file for object dumping. 
- * 
+ * Source file for object dumping.
+ *
  */
 
 #define DISABLE_WARN_UNUSED_RESULT
@@ -122,9 +122,9 @@
 static int
 unknown_dump(loci_writer_f writer, void* cookie, of_object_t *obj)
 {
-    return writer(cookie, "Unable to print object of type %d, version %d\\n", 
+    return writer(cookie, "Unable to print object of type %d, version %d\\n",
                          obj->object_id, obj->version);
-}    
+}
 """)
 
     for version in of_g.of_version_range:
@@ -188,7 +188,7 @@
                     out.write("""
     %(cls)s_%(m_name)s_bind(obj, &%(v_name)s);
     out += %(sub_cls)s_%(ver_name)s_dump(writer, cookie, &%(v_name)s);
-""" % dict(cls=cls, sub_cls=sub_cls, m_name=m_name, 
+""" % dict(cls=cls, sub_cls=sub_cls, m_name=m_name,
            v_name=var_name_map(m_type), ver_name=ver_name))
 
             out.write("""
@@ -236,11 +236,11 @@
             if j < len(of_g.all_class_order) - 1: # Avoid ultimate comma
                 comma = ","
 
-            if (not loxi_utils.class_in_version(cls, version) or 
+            if (not loxi_utils.class_in_version(cls, version) or
                     cls in type_maps.inheritance_map):
                 out.write("    unknown_dump%s\n" % comma);
             else:
-                out.write("    %s_%s_dump%s\n" % 
+                out.write("    %s_%s_dump%s\n" %
                           (cls, loxi_utils.version_to_name(version), comma))
         out.write("};\n\n")
 
@@ -257,7 +257,7 @@
 of_object_dump(loci_writer_f writer, void* cookie, of_object_t *obj)
 {
     if ((obj->object_id > 0) && (obj->object_id < OF_OBJECT_COUNT)) {
-        if (((obj)->version > 0) && ((obj)->version <= OF_VERSION_1_2)) {
+        if (((obj)->version > 0) && ((obj)->version <= OF_VERSION_1_3)) {
             /* @fixme VERSION */
             return dump_funs[obj->version][obj->object_id](writer, cookie, (of_object_t *)obj);
         } else {
diff --git a/c_gen/c_match.py b/c_gen/c_match.py
index 8c27bb5..a45090a 100644
--- a/c_gen/c_match.py
+++ b/c_gen/c_match.py
@@ -40,9 +40,8 @@
 # takes mask
 
 import sys
-import of_g
-import loxi_front_end.oxm as oxm
-import loxi_front_end.match as match
+import c_gen.of_g_legacy as of_g
+import c_gen.match as match
 import c_code_gen
 
 def match_c_top_matter(out, name):
@@ -179,6 +178,18 @@
 } of_match_t;
 
 /**
+ * Mask the values in the match structure according to its fields
+ */
+static inline void of_match_values_mask(of_match_t *match)
+{
+    int idx;
+
+    for (idx = 0; idx < sizeof(of_match_fields_t); idx++) {
+        ((uint8_t *)&match->fields)[idx] &= ((uint8_t *)&match->masks)[idx];
+    }
+}
+
+/**
  * IP Mask map.  IP maks wildcards from OF 1.0 are interpretted as
  * indices into the map below.
  *
@@ -275,6 +286,14 @@
     OF_OXM_INDEX_IPV6_ND_TLL    = 33, /* Target link-layer for ND. */
     OF_OXM_INDEX_MPLS_LABEL     = 34, /* MPLS label. */
     OF_OXM_INDEX_MPLS_TC        = 35, /* MPLS TC. */
+
+    OF_OXM_INDEX_BSN_IN_PORTS_128 = 36,
+    OF_OXM_INDEX_BSN_LAG_ID = 37,
+    OF_OXM_INDEX_BSN_VRF = 38,
+    OF_OXM_INDEX_BSN_GLOBAL_VRF_ALLOWED = 39,
+    OF_OXM_INDEX_BSN_L3_INTERFACE_CLASS_ID = 40,
+    OF_OXM_INDEX_BSN_L3_SRC_CLASS_ID = 41,
+    OF_OXM_INDEX_BSN_L3_DST_CLASS_ID = 42,
 };
 
 #define OF_OXM_BIT(index) (((uint64_t) 1) << (index))
@@ -589,8 +608,8 @@
 
     /* For each active member, add an OXM entry to the list */
 """)
-    # @fixme Would like to generate the list in some reasonable order
-    for key, entry in match.of_match_members.items():
+    for key in match.match_keys_sorted:
+        entry = match.of_match_members[key]
         out.write("""\
     if (OF_MATCH_MASK_%(ku)s_ACTIVE_TEST(src)) {
         if (!OF_MATCH_MASK_%(ku)s_EXACT_TEST(src)) {
@@ -598,17 +617,17 @@
             elt = &oxm_entry.%(key)s_masked;
 
             of_oxm_%(key)s_masked_init(elt,
-                src->version, -1, 1);
+                oxm_list->version, -1, 1);
             of_list_oxm_append_bind(oxm_list, &oxm_entry);
-            of_oxm_%(key)s_masked_value_set(elt, 
+            of_oxm_%(key)s_masked_value_set(elt,
                    src->fields.%(key)s);
-            of_oxm_%(key)s_masked_value_mask_set(elt, 
+            of_oxm_%(key)s_masked_value_mask_set(elt,
                    src->masks.%(key)s);
         } else {  /* Active, but not masked */
             of_oxm_%(key)s_t *elt;
             elt = &oxm_entry.%(key)s;
             of_oxm_%(key)s_init(elt,
-                src->version, -1, 1);
+                oxm_list->version, -1, 1);
             of_list_oxm_append_bind(oxm_list, &oxm_entry);
             of_oxm_%(key)s_value_set(elt, src->fields.%(key)s);
         }
@@ -637,9 +656,9 @@
         return OF_ERROR_PARAM;
     }
     if (dst->object_id != OF_MATCH_V3) {
-        of_match_v3_init(dst, src->version, 0, 0);
+        of_match_v3_init(dst, OF_VERSION_1_2, 0, 0);
     }
-    if ((oxm_list = of_list_oxm_new(src->version)) == NULL) {
+    if ((oxm_list = of_list_oxm_new(dst->version)) == NULL) {
         return OF_ERROR_RESOURCE;
     }
 
@@ -683,26 +702,14 @@
 
     of_match_v1_wildcards_get(src, &wc);
 """)
-    # Deal with nw fields first
-    out.write("""
-    /* Handle L3 src and dst wildcarding first */
-    /* @fixme Check mask values are properly treated for ipv4 src/dst */
-    if ((count = OF_MATCH_V1_WC_IPV4_DST_GET(wc)) < 32) {
-        of_match_v1_ipv4_dst_get(src, &dst->fields.ipv4_dst);
-        if (count > 0) { /* Not exact match */
-            dst->masks.ipv4_dst = ~(((uint32_t)1 << count) - 1);
-        } else {
-            OF_MATCH_MASK_IPV4_DST_EXACT_SET(dst);
-        }
-    }
-""")
     for key in sorted(match.of_v1_keys):
         if key in ["ipv4_src", "ipv4_dst"]: # Special cases for masks here
             out.write("""
     count = OF_MATCH_V1_WC_%(ku)s_GET(wc);
     dst->masks.%(key)s = of_ip_index_to_mask(count);
-    /* @todo Review if we should only get the addr when masks.%(key)s != 0 */
     of_match_v1_%(key)s_get(src, &dst->fields.%(key)s);
+    /* Clear the bits not indicated by mask; IP addrs are special for 1.0 */
+    dst->fields.%(key)s &= dst->masks.%(key)s;
 """ % dict(ku=key.upper(), key=key))
         else:
             out.write("""
@@ -750,6 +757,9 @@
 """ % dict(ku=key.upper(), key=key))
 
     out.write("""
+    /* Clear values outside of masks */
+    of_match_values_mask(dst);
+
     return OF_ERROR_NONE;
 }
 """)
@@ -809,6 +819,9 @@
         rv = of_list_oxm_next(&oxm_list, &oxm_entry);
     } /* end OXM iteration */
 
+    /* Clear values outside of masks */
+    of_match_values_mask(dst);
+
     return OF_ERROR_NONE;
 }
 """)
@@ -900,7 +913,7 @@
             of_match_v%(version)d_t wire_match;
             of_match_v%(version)d_init(&wire_match,
                    %(ver_name)s, -1, 1);
-            of_object_buffer_bind((of_object_t *)&wire_match, 
+            of_object_buffer_bind((of_object_t *)&wire_match,
                 octets->data, octets->bytes, NULL);
             OF_TRY(of_match_v%(version)d_to_match(&wire_match, match));
 
@@ -958,7 +971,7 @@
     int idx;
 
     for (idx = 0; idx < OF_IPV6_BYTES; idx++) {
-        if ((v1->addr[idx] & mask->addr[idx]) != 
+        if ((v1->addr[idx] & mask->addr[idx]) !=
                (v2->addr[idx] & mask->addr[idx])) {
             return 0;
         }
@@ -977,7 +990,7 @@
     int idx;
 
     for (idx = 0; idx < OF_IPV6_BYTES; idx++) {
-        if (((v1->addr[idx] & m1->addr[idx]) & m2->addr[idx]) != 
+        if (((v1->addr[idx] & m1->addr[idx]) & m2->addr[idx]) !=
                ((v2->addr[idx] & m1->addr[idx]) & m2->addr[idx])) {
             return 0;
         }
@@ -1020,12 +1033,12 @@
  * Boolean test if two values agree when restricted to a mask
  */
 static inline int
-of_restricted_match_mac_addr(of_mac_addr_t *v1, of_mac_addr_t *v2, 
+of_restricted_match_mac_addr(of_mac_addr_t *v1, of_mac_addr_t *v2,
                              of_mac_addr_t *mask) {
     int idx;
 
     for (idx = 0; idx < OF_MAC_ADDR_BYTES; idx++) {
-        if ((v1->addr[idx] & mask->addr[idx]) != 
+        if ((v1->addr[idx] & mask->addr[idx]) !=
                (v2->addr[idx] & mask->addr[idx])) {
             return 0;
         }
@@ -1044,7 +1057,7 @@
     int idx;
 
     for (idx = 0; idx < OF_MAC_ADDR_BYTES; idx++) {
-        if (((v1->addr[idx] & m1->addr[idx]) & m2->addr[idx]) != 
+        if (((v1->addr[idx] & m1->addr[idx]) & m2->addr[idx]) !=
                ((v2->addr[idx] & m1->addr[idx]) & m2->addr[idx])) {
             return 0;
         }
@@ -1061,6 +1074,15 @@
 #define OF_OVERLAP_MAC_ADDR(v1, v2, m1, m2) \\
     of_overlap_mac_addr((v1), (v2), (m1), (m2))
 
+#define OF_MORE_SPECIFIC_BITMAP_128(v1, v2) \\
+    (OF_MORE_SPECIFIC_INT((v1)->lo, (v2)->lo) && OF_MORE_SPECIFIC_INT((v1)->hi, (v2)->hi))
+
+#define OF_RESTRICTED_MATCH_BITMAP_128(v1, v2, mask) \\
+    (OF_RESTRICTED_MATCH_INT((v1)->lo, (v2)->lo, (mask)->lo) && OF_RESTRICTED_MATCH_INT((v1)->hi, (v2)->hi, (mask)->hi))
+
+#define OF_OVERLAP_BITMAP_128(v1, v2, m1, m2) \\
+    (OF_OVERLAP_INT((v1)->lo, (v2)->lo, (m1)->lo, (m2)->lo) && OF_OVERLAP_INT((v1)->hi, (v2)->hi, (m1)->hi, (m2)->hi))
+
 /**
  * More-specific-than macro for integer types; see above
  * @return true if v1 is equal to or more specific than v2
@@ -1131,6 +1153,9 @@
         elif entry["m_type"] == "of_mac_addr_t":
             comp = "OF_MORE_SPECIFIC_MAC_ADDR"
             match_type = "OF_RESTRICTED_MATCH_MAC_ADDR"
+        elif entry["m_type"] == "of_bitmap_128_t":
+            comp = "OF_MORE_SPECIFIC_BITMAP_128"
+            match_type = "OF_RESTRICTED_MATCH_BITMAP_128"
         else: # Integer
             comp = "OF_MORE_SPECIFIC_INT"
             match_type = "OF_RESTRICTED_MATCH_INT"
@@ -1147,7 +1172,7 @@
             %(q_m)s)) {
         return 0;
     }
-""" % dict(match_type=match_type, comp=comp, q_f=q_f, e_f=e_f, 
+""" % dict(match_type=match_type, comp=comp, q_f=q_f, e_f=e_f,
            q_m=q_m, e_m=e_m, key=key))
 
     out.write("""
@@ -1185,6 +1210,8 @@
             check = "OF_OVERLAP_IPV6"
         elif entry["m_type"] == "of_mac_addr_t":
             check = "OF_OVERLAP_MAC_ADDR"
+        elif entry["m_type"] == "of_bitmap_128_t":
+            check = "OF_OVERLAP_BITMAP_128"
         else: # Integer
             check = "OF_OVERLAP_INT"
             m1 = "m1->%s" % key
@@ -1193,7 +1220,7 @@
             f2 = "f2->%s" % key
         out.write("""
     /* Check overlap for %(key)s */
-    if (!%(check)s(%(f1)s, %(f2)s, 
+    if (!%(check)s(%(f1)s, %(f2)s,
         %(m2)s, %(m1)s)) {
         return 0; /* This field differentiates; all done */
     }
diff --git a/c_gen/c_show_gen.py b/c_gen/c_show_gen.py
index 14709ab..fc3edb8 100644
--- a/c_gen/c_show_gen.py
+++ b/c_gen/c_show_gen.py
@@ -33,13 +33,14 @@
 """
 
 import sys
-import of_g
-import loxi_front_end.match as match
-import loxi_front_end.flags as flags
+import c_gen.of_g_legacy as of_g
+import c_gen.match as match
+import c_gen.flags as flags
 from generic_utils import *
-import loxi_front_end.type_maps as type_maps
+import c_gen.type_maps as type_maps
 import loxi_utils.loxi_utils as loxi_utils
-import loxi_front_end.identifiers as identifiers
+import c_gen.loxi_utils_legacy as loxi_utils
+import c_gen.identifiers as identifiers
 from c_test_gen import var_name_map
 
 def gen_obj_show_h(out, name):
@@ -49,7 +50,7 @@
  *
  * AUTOMATICALLY GENERATED FILE.  Edits will be lost on regen.
  *
- * Header file for object showing. 
+ * Header file for object showing.
  */
 
 /**
@@ -75,9 +76,9 @@
 
 
 /**
- * Show any OF object. 
+ * Show any OF object.
  */
-int of_object_show(loci_writer_f writer, void* cookie, of_object_t* obj); 
+int of_object_show(loci_writer_f writer, void* cookie, of_object_t* obj);
 
 
 
@@ -110,8 +111,8 @@
  *
  * AUTOMATICALLY GENERATED FILE.  Edits will be lost on regen.
  *
- * Source file for object showing. 
- * 
+ * Source file for object showing.
+ *
  */
 
 #define DISABLE_WARN_UNUSED_RESULT
@@ -122,9 +123,9 @@
 static int
 unknown_show(loci_writer_f writer, void* cookie, of_object_t *obj)
 {
-    return writer(cookie, "Unable to print object of type %d, version %d\\n", 
+    return writer(cookie, "Unable to print object of type %d, version %d\\n",
                          obj->object_id, obj->version);
-}    
+}
 """)
 
     for version in of_g.of_version_range:
@@ -158,7 +159,7 @@
                 m_type = member["m_type"]
                 m_name = member["name"]
                 #emitter = "LOCI_SHOW_" + loxi_utils.type_to_short_name(m_type)
-                emitter = "LOCI_SHOW_" + loxi_utils.type_to_short_name(m_type) + "_" + m_name; 
+                emitter = "LOCI_SHOW_" + loxi_utils.type_to_short_name(m_type) + "_" + m_name;
                 if loxi_utils.skip_member_name(m_name):
                     continue
                 if (loxi_utils.type_is_scalar(m_type) or
@@ -167,7 +168,7 @@
     %(cls)s_%(m_name)s_get(obj, &%(v_name)s);
     out += writer(cookie, "%(m_name)s=");
     out += %(emitter)s(writer, cookie, %(v_name)s);
-    out += writer(cookie, " "); 
+    out += writer(cookie, " ");
 """ % dict(cls=cls, m_name=m_name, m_type=m_type,
            v_name=var_name_map(m_type), emitter=emitter))
                 elif loxi_utils.class_is_list(m_type):
@@ -179,7 +180,7 @@
     %(u_type)s_ITER(&%(v_name)s, &elt, rv) {
         of_object_show(writer, cookie, (of_object_t *)&elt);
     }
-    out += writer(cookie, "} "); 
+    out += writer(cookie, "} ");
 """ % dict(sub_cls=sub_cls, u_type=sub_cls.upper(), v_name=var_name_map(m_type),
            elt_type=elt_type, cls=cls, m_name=m_name, m_type=m_type))
                 else:
@@ -187,7 +188,7 @@
                     out.write("""
     %(cls)s_%(m_name)s_bind(obj, &%(v_name)s);
     out += %(sub_cls)s_%(ver_name)s_show(writer, cookie, &%(v_name)s);
-""" % dict(cls=cls, sub_cls=sub_cls, m_name=m_name, 
+""" % dict(cls=cls, sub_cls=sub_cls, m_name=m_name,
            v_name=var_name_map(m_type), ver_name=ver_name))
 
             out.write("""
@@ -207,12 +208,12 @@
     for key, entry in match.of_match_members.items():
         m_type = entry["m_type"]
         #emitter = "LOCI_SHOW_" + loxi_utils.type_to_short_name(m_type)
-        emitter = "LOCI_SHOW_" + loxi_utils.type_to_short_name(m_type) + "_" + key; 
+        emitter = "LOCI_SHOW_" + loxi_utils.type_to_short_name(m_type) + "_" + key;
         out.write("""
     if (OF_MATCH_MASK_%(ku)s_ACTIVE_TEST(match)) {
-        out += writer(cookie, "%(key)s active="); 
+        out += writer(cookie, "%(key)s active=");
         out += %(emitter)s(writer, cookie, match->fields.%(key)s);
-        out += writer(cookie, "/"); 
+        out += writer(cookie, "/");
         out += %(emitter)s(writer, cookie, match->masks.%(key)s);
         out += writer(cookie, " ");
     }
@@ -234,11 +235,11 @@
             if j < len(of_g.all_class_order) - 1: # Avoid ultimate comma
                 comma = ","
 
-            if (not loxi_utils.class_in_version(cls, version) or 
+            if (not loxi_utils.class_in_version(cls, version) or
                     cls in type_maps.inheritance_map):
                 out.write("    unknown_show%s\n" % comma);
             else:
-                out.write("    %s_%s_show%s\n" % 
+                out.write("    %s_%s_show%s\n" %
                           (cls, loxi_utils.version_to_name(version), comma))
         out.write("};\n\n")
 
diff --git a/c_gen/c_test_gen.py b/c_gen/c_test_gen.py
index 97cc78d..cf03e0e 100644
--- a/c_gen/c_test_gen.py
+++ b/c_gen/c_test_gen.py
@@ -54,18 +54,20 @@
 Verify that the members all have the appropriate value
 
 Through out, checking the consistency of memory and memory operations
-is done with mcheck (not supported on Mac OS X).
+is done with mcheck (if available).
 
 """
 
 import sys
-import of_g
-import loxi_front_end.match as match
-import loxi_front_end.flags as flags
+import c_gen.of_g_legacy as of_g
+import c_gen.match as match
+import c_gen.flags as flags
 from generic_utils import *
-import loxi_front_end.type_maps as type_maps
-import loxi_utils.loxi_utils as loxi_utils
-import loxi_front_end.identifiers as identifiers
+import c_gen.type_maps as type_maps
+import c_gen.loxi_utils_legacy as loxi_utils
+import c_gen.identifiers as identifiers
+import util
+import test_data
 
 def var_name_map(m_type):
     """
@@ -79,20 +81,26 @@
         uint16_t="val16",
         uint32_t="val32",
         uint64_t="val64",
+        of_ipv4_t="ipv4",
         of_port_no_t="port_no",
         of_fm_cmd_t="fm_cmd",
         of_wc_bmap_t="wc_bmap",
         of_match_bmap_t = "match_bmap",
-        of_port_name_t="port_name", 
+        of_port_name_t="port_name",
         of_table_name_t="table_name",
         of_desc_str_t="desc_str",
-        of_serial_num_t="ser_num", 
-        of_mac_addr_t="mac_addr", 
+        of_serial_num_t="ser_num",
+        of_mac_addr_t="mac_addr",
         of_ipv6_t="ipv6",
         # Non-scalars; more TBD
         of_octets_t="octets",
         of_meter_features_t="features",
-        of_match_t="match")
+        of_match_t="match",
+        # BSN extensions
+        of_bsn_vport_q_in_q_t="vport",
+        of_bitmap_128_t="bitmap_128",
+        of_checksum_128_t="checksum_128",
+        )
 
     if m_type.find("of_list_") == 0:
         return "list"
@@ -102,10 +110,10 @@
 
 integer_types = ["uint8_t", "uint16_t", "uint32_t", "uint64_t",
                  "of_port_no_t", "of_fm_cmd_t", "of_wc_bmap_t",
-                 "of_match_bmap_t"]
+                 "of_match_bmap_t", "of_ipv4_t"]
 string_types = [ "of_port_name_t", "of_table_name_t",
-                "of_desc_str_t", "of_serial_num_t", "of_mac_addr_t", 
-                "of_ipv6_t"]
+                "of_desc_str_t", "of_serial_num_t", "of_mac_addr_t",
+                "of_ipv6_t", "of_bitmap_128_t", "of_checksum_128_t"]
 
 scalar_types = integer_types[:]
 scalar_types.extend(string_types)
@@ -121,6 +129,35 @@
             m_name == "experimenter" or
             m_name == "subtype")):
         return True
+
+    classes = ["of_bsn_lacp_stats_request",
+               "of_bsn_lacp_stats_reply",
+               "of_bsn_switch_pipeline_stats_request",
+               "of_bsn_switch_pipeline_stats_reply",
+               "of_bsn_port_counter_stats_request",
+               "of_bsn_port_counter_stats_reply",
+               "of_bsn_vlan_counter_stats_request",
+               "of_bsn_vlan_counter_stats_reply",
+               "of_bsn_gentable_entry_desc_stats_request",
+               "of_bsn_gentable_entry_desc_stats_reply",
+               "of_bsn_gentable_entry_stats_request",
+               "of_bsn_gentable_entry_stats_reply",
+               "of_bsn_gentable_desc_stats_request",
+               "of_bsn_gentable_desc_stats_reply",
+               "of_bsn_gentable_stats_request",
+               "of_bsn_gentable_stats_reply",
+               "of_bsn_gentable_bucket_stats_request",
+               "of_bsn_gentable_bucket_stats_reply",
+               "of_bsn_flow_checksum_bucket_stats_request",
+               "of_bsn_flow_checksum_bucket_stats_reply",
+               "of_bsn_table_checksum_stats_request",
+               "of_bsn_table_checksum_stats_reply",
+            ]
+
+    if (cls in classes and (
+            m_name == "experimenter" or
+            m_name == "subtype")):
+        return True
     return loxi_utils.skip_member_name(m_name) or m_type not in scalar_types
 
 def gen_fill_string(out):
@@ -240,7 +277,7 @@
 
     for key, entry in match.of_match_members.items():
         out.write("""
-    if (!(of_match_incompat[version] & 
+    if (!(of_match_incompat[version] &
             OF_OXM_BIT(OF_OXM_INDEX_%(ku)s))) {
         OF_MATCH_MASK_%(ku)s_EXACT_SET(match);
         VAR_%(u_type)s_INIT(match->fields.%(key)s, value);
@@ -255,6 +292,9 @@
         match->masks.ipv4_src = 0xffff0000;
         match->masks.ipv4_dst = 0xfffff800;
     }
+
+    /* Restrict values according to masks */
+    of_match_values_mask(match);
     return value;
 }
 
@@ -337,6 +377,7 @@
 extern int run_list_limits_tests(void);
 
 extern int test_ext_objs(void);
+extern int test_datafiles(void);
 
 """)
 
@@ -362,7 +403,7 @@
  * Declarations for list population and check primitives
  */
 """)
- 
+
     for version in of_g.of_version_range:
         for cls in of_g.ordered_list_objects:
             if cls in type_maps.inheritance_map:
@@ -398,10 +439,11 @@
 #include <locitest/unittest.h>
 #include <locitest/test_common.h>
 
-#if !defined(__APPLE__)
+/* mcheck is a glibc extension */
+#if defined(__linux__)
 #include <mcheck.h>
 #define MCHECK_INIT mcheck(NULL)
-#else /* mcheck not available under OS X */
+#else
 #define MCHECK_INIT do { } while (0)
 #endif
 
@@ -411,7 +453,7 @@
 int exit_on_error = 1;
 
 /**
- * Global error state: 0 is okay, 1 is error 
+ * Global error state: 0 is okay, 1 is error
  */
 int global_error = 0;
 
@@ -501,14 +543,14 @@
                 out.write("    RUN_TEST(%s_scalar);\n" % test_name)
 
     out.write("    return TEST_PASS;\n}\n");
-    
+
 def message_scalar_test(out, version, cls):
     """
     Generate one test case for the given version and class
     """
 
     members, member_types = scalar_member_types_get(cls, version)
-    length = of_g.base_length[(cls, version)]
+    length = of_g.base_length[(cls, version)] + of_g.extra_length.get((cls, version), 0)
     v_name = loxi_utils.version_to_name(version)
 
     out.write("""
@@ -523,7 +565,7 @@
     TEST_ASSERT(obj->length == %(length)d);
     TEST_ASSERT(obj->parent == NULL);
     TEST_ASSERT(obj->object_id == %(u_cls)s);
-""" % dict(cls=cls, u_cls=cls.upper(), 
+""" % dict(cls=cls, u_cls=cls.upper(),
            v_name=v_name, length=length, version=version))
     if not type_maps.class_is_virtual(cls):
         out.write("""
@@ -539,7 +581,7 @@
 
     /* Check values just set */
     TEST_ASSERT(%(cls)s_%(v_name)s_check_scalars(obj, 1) != 0);
-""" % dict(cls=cls, u_cls=cls.upper(), 
+""" % dict(cls=cls, u_cls=cls.upper(),
            v_name=v_name, length=length, version=version))
 
     out.write("""
@@ -566,7 +608,7 @@
     for member in members:
         m_type = member["m_type"]
         m_name = member["name"]
-        if (not loxi_utils.type_is_scalar(m_type) or 
+        if (not loxi_utils.type_is_scalar(m_type) or
             ignore_member(cls, version, m_name, m_type)):
             continue
         if not m_type in member_types:
@@ -580,8 +622,8 @@
     """
     out.write("""
 /**
- * Populate the scalar values in obj of type %(cls)s, 
- * version %(v_name)s 
+ * Populate the scalar values in obj of type %(cls)s,
+ * version %(v_name)s
  * @param obj Pointer to an object to populate
  * @param value The seed value to use in populating the object
  * @returns The value after increments for this object's values
@@ -608,11 +650,11 @@
     return value;
 }
 """)
-    
+
     out.write("""
 /**
- * Check scalar values in obj of type %(cls)s, 
- * version %(v_name)s 
+ * Check scalar values in obj of type %(cls)s,
+ * version %(v_name)s
  * @param obj Pointer to an object to check
  * @param value Starting value for checking
  * @returns The value after increments for this object's values
@@ -644,7 +686,7 @@
 
 def gen_scalar_set_check_funs(out):
     """
-    For each object class with scalar members, generate functions that 
+    For each object class with scalar members, generate functions that
     set and check their values
     """
     for version in of_g.of_version_range:
@@ -658,7 +700,7 @@
     base_type = loxi_utils.list_to_entry_type(cls)
     setup_template = """
     %(subcls)s_init(%(inst)s, %(v_name)s, -1, 1);
-    %(cls)s_append_bind(list, 
+    %(cls)s_append_bind(list,
             (%(base_type)s_t *)%(inst)s);
     value = %(subcls)s_%(v_name)s_populate(
         %(inst)s, value);
@@ -670,8 +712,8 @@
 """ % subcls)
     for i in range(2):
         out.write(setup_template %
-                  dict(inst=instance, subcls=subcls, v_name=v_name, 
-                       base_type=base_type, cls=cls, inst_len=inst_len, 
+                  dict(inst=instance, subcls=subcls, v_name=v_name,
+                       base_type=base_type, cls=cls, inst_len=inst_len,
                        version=version))
 
 def check_instance(out, cls, subcls, instance, v_name, inst_len, version, last):
@@ -695,7 +737,7 @@
 """
     out.write("\n    /* Check two instances of type %s */" % instance)
 
-    out.write(check_template % 
+    out.write(check_template %
               dict(elt_name=loxi_utils.enum_name(subcls), inst_len=inst_len,
                    inst=instance, subcls=subcls,
                    v_name=loxi_utils.version_to_name(version)))
@@ -703,7 +745,7 @@
     TEST_OK(%(cls)s_next(list, &elt));
 """ % dict(cls=cls))
 
-    out.write(check_template % 
+    out.write(check_template %
               dict(elt_name=loxi_utils.enum_name(subcls), inst_len=inst_len,
                    inst=instance, subcls=subcls,
                    v_name=loxi_utils.version_to_name(version)))
@@ -735,8 +777,9 @@
     %(base_type)s_t elt;
     int cur_len = 0;
 """ % dict(cls=cls, base_type=base_type))
-    
+
     sub_classes =  type_maps.sub_class_map(base_type, version)
+    sub_classes = [(instance, subcls) for (instance, subcls) in sub_classes if not type_maps.class_is_virtual(subcls)]
     v_name = loxi_utils.version_to_name(version)
 
     if len(sub_classes) == 0:
@@ -770,7 +813,7 @@
     """
     out.write("""
 /**
- * Check a list of type %(cls)s generated by 
+ * Check a list of type %(cls)s generated by
  * list_setup_%(cls)s_%(v_name)s
  */
 int
@@ -782,8 +825,9 @@
     out.write("""
     %(base_type)s_t elt;
 """ % dict(cls=cls, base_type=base_type))
-    
+
     sub_classes =  type_maps.sub_class_map(base_type, version)
+    sub_classes = [(instance, subcls) for (instance, subcls) in sub_classes if not type_maps.class_is_virtual(subcls)]
     v_name = loxi_utils.version_to_name(version)
 
     if len(sub_classes) == 0:
@@ -804,7 +848,7 @@
             inst_len = -1
         else:
             inst_len = loxi_utils.base_type_to_length(base_type, version)
-        check_instance(out, cls, base_type, "elt_p", v_name, inst_len, 
+        check_instance(out, cls, base_type, "elt_p", v_name, inst_len,
                        version, True)
     else:
         count = 0
@@ -814,7 +858,7 @@
                 inst_len = -1
             else:
                 inst_len = of_g.base_length[(subcls, version)]
-            check_instance(out, cls, subcls, instance, v_name, inst_len, 
+            check_instance(out, cls, subcls, instance, v_name, inst_len,
                            version, count==len(sub_classes))
 
     out.write("""
@@ -856,7 +900,7 @@
 
     value = list_setup_%(cls)s_%(v_name)s(list, value);
     TEST_ASSERT(value != 0);
-""" % dict(cls=cls, base_type=base_type, v_name=loxi_utils.version_to_name(version), 
+""" % dict(cls=cls, base_type=base_type, v_name=loxi_utils.version_to_name(version),
            enum_cls=loxi_utils.enum_name(cls)))
 
     out.write("""
@@ -890,7 +934,7 @@
 
 #include <locitest/test_common.h>
 """)
-    
+
     for version in of_g.of_version_range:
         v_name = loxi_utils.version_to_name(version)
         out.write("""
@@ -1034,9 +1078,9 @@
         out.write("""
     /* Serialize to version %(v_name)s */
     TEST_ASSERT((value = of_match_populate(&match1, %(v_name)s, value)) > 0);
-    TEST_ASSERT(of_match_serialize(%(v_name)s, &match1, &octets) == 
+    TEST_ASSERT(of_match_serialize(%(v_name)s, &match1, &octets) ==
         OF_ERROR_NONE);
-    TEST_ASSERT(of_match_deserialize(%(v_name)s, &match2, &octets) == 
+    TEST_ASSERT(of_match_deserialize(%(v_name)s, &match2, &octets) ==
         OF_ERROR_NONE);
     TEST_ASSERT(memcmp(&match1, &match2, sizeof(match1)) == 0);
     FREE(octets.data);
@@ -1075,7 +1119,9 @@
         for cls in of_g.ordered_messages:
             if not (cls, version) in of_g.base_length:
                 continue
-            bytes = of_g.base_length[(cls, version)]
+            if type_maps.class_is_virtual(cls):
+                continue
+            bytes = of_g.base_length[(cls, version)] + of_g.extra_length.get((cls, version), 0)
             out.write("""
 static int
 test_%(cls)s_create_%(v_name)s(void)
@@ -1128,11 +1174,13 @@
         for cls in of_g.ordered_messages:
             if not (cls, version) in of_g.base_length:
                 continue
+            if type_maps.class_is_virtual(cls):
+                continue
             test_name = "%s_create_%s" % (cls, loxi_utils.version_to_name(version))
             out.write("    RUN_TEST(%s);\n" % test_name)
 
     out.write("\n    return TEST_PASS;\n}\n");
-        
+
 
 def gen_list_setup_check(out, cls, version):
     """
@@ -1156,8 +1204,9 @@
     %(base_type)s_t elt;
     int cur_len = 0;
 """ % dict(cls=cls, base_type=base_type))
-    
+
     sub_classes =  type_maps.sub_class_map(base_type, version)
+    sub_classes = [(instance, subcls) for (instance, subcls) in sub_classes if not type_maps.class_is_virtual(subcls)]
     v_name = loxi_utils.version_to_name(version)
 
     if len(sub_classes) == 0:
@@ -1188,7 +1237,7 @@
     else:
         for instance, subcls in sub_classes:
             inst_len = of_g.base_length[(subcls, version)]
-            setup_instance(out, cls, subcls, instance, v_name, 
+            setup_instance(out, cls, subcls, instance, v_name,
                            inst_len, version)
     out.write("""
     return value;
@@ -1196,7 +1245,7 @@
 """)
     out.write("""
 /**
- * Check a list of type %(cls)s generated by 
+ * Check a list of type %(cls)s generated by
  * %(cls)s_%(v_name)s_populate
  * @param list Pointer to the list that was populated
  * @param value Starting value for checking
@@ -1213,9 +1262,10 @@
     int count = 0;
     int rv;
 """ % dict(cls=cls, base_type=base_type))
-    
+
 
     sub_classes =  type_maps.sub_class_map(base_type, version)
+    sub_classes = [(instance, subcls) for (instance, subcls) in sub_classes if not type_maps.class_is_virtual(subcls)]
     v_name = loxi_utils.version_to_name(version)
 
     if len(sub_classes) == 0:
@@ -1238,7 +1288,7 @@
             inst_len = -1
         else:
             inst_len = loxi_utils.base_type_to_length(base_type, version)
-        check_instance(out, cls, base_type, "elt_p", v_name, inst_len, 
+        check_instance(out, cls, base_type, "elt_p", v_name, inst_len,
                        version, True)
     else:
         count = 0
@@ -1248,7 +1298,7 @@
                 inst_len = -1
             else:
                 inst_len = of_g.base_length[(subcls, version)]
-            check_instance(out, cls, subcls, instance, v_name, inst_len, 
+            check_instance(out, cls, subcls, instance, v_name, inst_len,
                            version, count==len(sub_classes))
     out.write("""
 """ % dict(base_type=base_type))
@@ -1332,7 +1382,7 @@
         /* Test bind */
         %(cls)s_%(m_name)s_bind(obj, &sub_cls);
     }
-""" % dict(var_name=var_name_map(m_type), cls=cls, 
+""" % dict(var_name=var_name_map(m_type), cls=cls,
            m_name=m_name, sub_cls=sub_cls,
            v_name=loxi_utils.version_to_name(version)))
 
@@ -1355,7 +1405,7 @@
     TEST_ASSERT(value != 0);
     %(cls)s_%(m_name)s_set(
         obj, &%(var_name)s);
-""" % dict(cls=cls, var_name=var_name_map(m_type), 
+""" % dict(cls=cls, var_name=var_name_map(m_type),
            m_name=m_name, v_name=loxi_utils.version_to_name(version)))
         elif m_type == "of_octets_t":
             out.write("""\
@@ -1484,7 +1534,7 @@
     """
 
     members, member_types = scalar_member_types_get(cls, version)
-    length = of_g.base_length[(cls, version)]
+    length = of_g.base_length[(cls, version)] + of_g.extra_length.get((cls, version), 0)
     v_name = loxi_utils.version_to_name(version)
 
     out.write("""
@@ -1498,7 +1548,7 @@
     TEST_ASSERT(obj->length == %(length)d);
     TEST_ASSERT(obj->parent == NULL);
     TEST_ASSERT(obj->object_id == %(u_cls)s);
-""" % dict(cls=cls, u_cls=cls.upper(), 
+""" % dict(cls=cls, u_cls=cls.upper(),
            v_name=v_name, length=length, version=version))
     if (not type_maps.class_is_virtual(cls)) or loxi_utils.class_is_list(cls):
         out.write("""
@@ -1522,7 +1572,7 @@
     /* Check values just set */
     TEST_ASSERT(%(cls)s_%(v_name)s_check(
         obj, 1) != 0);
-""" % dict(cls=cls, u_cls=cls.upper(), 
+""" % dict(cls=cls, u_cls=cls.upper(),
            v_name=v_name, length=length, version=version))
 
     out.write("""
@@ -1602,7 +1652,7 @@
     elt_type = loxi_utils.list_to_entry_type(cls)
     out.write("""
 /**
- * Duplicate a list of type %(cls)s 
+ * Duplicate a list of type %(cls)s
  * using accessor functions
  * @param src Pointer to object to be duplicated
  * @returns A new object of type %(cls)s.
@@ -1643,7 +1693,7 @@
     ver_name = loxi_utils.version_to_name(version)
     out.write("""
 /**
- * Duplicate a super class object of type %(cls)s 
+ * Duplicate a super class object of type %(cls)s
  * @param src Pointer to object to be duplicated
  * @returns A new object of type %(cls)s.
  *
@@ -1680,7 +1730,7 @@
 
     out.write("""
 /**
- * Duplicate an object of type %(cls)s 
+ * Duplicate an object of type %(cls)s
  * using accessor functions
  * @param src Pointer to object to be duplicated
  * @returns A new object of type %(cls)s.
@@ -1741,7 +1791,7 @@
     }
     %(cls)s_%(m_name)s_set(dst, dst_%(v_name)s);
     %(sub_cls)s_delete(dst_%(v_name)s);
-""" % dict(sub_cls=sub_cls, cls=cls, m_name=m_name, 
+""" % dict(sub_cls=sub_cls, cls=cls, m_name=m_name,
            v_name=var_name_map(m_type), ver_name=ver_name))
 
     out.write("""
@@ -1902,7 +1952,7 @@
     of_object_dump((loci_writer_f)fprintf, out, obj);
     of_object_delete(obj);
 """ % dict(cls=cls, version=of_g.of_version_wire2name[version]))
-    
+
     out.write("""
     fclose(out);
     return TEST_PASS;
@@ -1962,3 +2012,16 @@
 }
 """)
 
+def gen_datafiles_tests(out, name):
+    tests = []
+    for filename in test_data.list_files():
+        data = test_data.read(filename)
+        if not 'c' in data:
+            continue
+        name = filename[:-5].replace("/", "_")
+        tests.append(dict(name=name,
+                          filename=filename,
+                          c=data['c'],
+                          binary=data['binary']))
+
+    util.render_template(out, "test_data.c", tests=tests)
diff --git a/c_gen/c_type_maps.py b/c_gen/c_type_maps.py
index d790c85..d4525ba 100644
--- a/c_gen/c_type_maps.py
+++ b/c_gen/c_type_maps.py
@@ -29,139 +29,16 @@
 # @brief C code generation for LOXI type related maps
 #
 
-import of_g
+import c_gen.of_g_legacy as of_g
 import sys
 from generic_utils import *
-import loxi_front_end.oxm as oxm
-import loxi_front_end.type_maps as type_maps
+import c_gen.type_maps as type_maps
 
 
 # Some number larger than small type values, but less then
 # reserved values like 0xffff
 max_type_value = 1000
 
-def gen_object_id_to_type(out):
-    out.write("""
-/**
- * Map from object ID to primary wire type
- *
- * For messages, this is the header type; in particular for stats, this is
- * the common stats request/response type.  For per-stats types, use the
- * stats type map.  For things like actions, instructions or queue-props,
- * this gives the "sub type".
- */
-""")
-    for version in of_g.of_version_range:
-        out.write("static const int\nof_object_to_type_map_v%d[OF_OBJECT_COUNT] = {\n"
-                  %version)
-        out.write("    -1, /* of_object, not a valid specific type */\n")
-        for j, cls in enumerate(of_g.all_class_order):
-            comma = ""
-            if j < len(of_g.all_class_order) - 1: # Avoid ultimate comma
-                comma = ","
-
-            if cls in type_maps.stats_reply_list:
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_stats_reply", version)],
-                           comma, cls))
-            elif cls in type_maps.stats_request_list:
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_stats_request", version)],
-                           comma, cls))
-            elif cls in type_maps.flow_mod_list:
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_flow_mod", version)],
-                           comma, cls))
-            elif (cls, version) in type_maps.type_val:
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[(cls, version)], comma, cls))
-            elif type_maps.message_is_extension(cls, version):
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_experimenter", version)],
-                           comma, cls))
-            elif type_maps.action_is_extension(cls, version):
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_action_experimenter",
-                                               version)],
-                           comma, cls))
-            elif type_maps.action_id_is_extension(cls, version):
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_action_id_experimenter",
-                                               version)],
-                           comma, cls))
-            elif type_maps.instruction_is_extension(cls, version):
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_instruction_experimenter",
-                                               version)],
-                           comma, cls))
-            elif type_maps.queue_prop_is_extension(cls, version):
-                out.write("    %d%s /* %s */\n" % 
-                          (type_maps.type_val[("of_queue_prop_experimenter",
-                                               version)],
-                           comma, cls))
-            elif type_maps.table_feature_prop_is_extension(cls, version):
-                out.write("    %d%s /* %s */\n" % 
-                    (type_maps.type_val[("of_table_feature_prop_experimenter",
-                                         version)],
-                     comma, cls))
-            else:
-                out.write("    -1%s /* %s (invalid) */\n" % (comma, cls))
-        out.write("};\n\n")
-
-    out.write("""
-/**
- * Unified map, indexed by wire version which is 1-based.
- */
-const int *const of_object_to_type_map[OF_VERSION_ARRAY_MAX] = {
-    NULL,
-""")
-    for version in of_g.of_version_range:
-        out.write("    of_object_to_type_map_v%d,\n" % version)
-    out.write("""
-};
-""")
-
-def gen_object_id_to_extension_data(out):
-    out.write("""
-/**
- * Extension data.
- * @fixme There must be a better way to represent this data
- */
-""")
-    for version in of_g.of_version_range:
-        out.write("""
-static const of_experimenter_data_t
-of_object_to_extension_data_v%d[OF_OBJECT_COUNT] = {
-""" % version)
-        out.write("    {0, 0, 0}, /* of_object, not a valid specific type */\n")
-        for j, cls in enumerate(of_g.all_class_order):
-            comma = ""
-            if j < len(of_g.all_class_order) - 1: # Avoid ultimate comma
-                comma = ","
-
-            if type_maps.class_is_extension(cls, version):
-                exp_name = type_maps.extension_to_experimenter_macro_name(cls)
-                subtype = type_maps.extension_to_subtype(cls, version)
-                out.write("    {1, %s, %d}%s /* %s */\n" % 
-                          (exp_name, subtype, comma, cls))
-            else:
-                out.write("    {0, 0, 0}%s /* %s (non-extension) */\n" %
-                          (comma, cls))
-        out.write("};\n\n")
-
-    out.write("""
-/**
- * Unified map, indexed by wire version which is 1-based.
- */
-const of_experimenter_data_t *const of_object_to_extension_data[OF_VERSION_ARRAY_MAX] = {
-    NULL,
-""")
-    for version in of_g.of_version_range:
-        out.write("    of_object_to_extension_data_v%d,\n" % version)
-    out.write("""
-};
-""")
-
 def gen_type_to_object_id(out, type_str, prefix, template,
                           value_array, max_val):
     """
@@ -219,7 +96,7 @@
     out.write("""
 };
 
-""" % dict(name=type_str, u_name=type_str.upper(), 
+""" % dict(name=type_str, u_name=type_str.upper(),
            max_val=max_val, c_name=prefix.lower()))
 
 def gen_type_maps(out):
@@ -228,9 +105,10 @@
     @param out The file handle to write to
     """
 
-    out.write("#include <loci/loci.h>\n\n")
-
     # Generate maps from wire type values to object IDs
+    gen_type_to_object_id(out, "error_msg_type_to_id", "OF_ERROR_MSG",
+                          "OF_%s_ERROR_MSG", type_maps.error_types,
+                          max_type_value)
     gen_type_to_object_id(out, "action_type_to_id", "OF_ACTION",
                           "OF_ACTION_%s", type_maps.action_types,
                           max_type_value)
@@ -238,7 +116,7 @@
                           "OF_ACTION_ID_%s", type_maps.action_id_types,
                           max_type_value)
     gen_type_to_object_id(out, "instruction_type_to_id", "OF_INSTRUCTION",
-                          "OF_INSTRUCTION_%s", type_maps.instruction_types, 
+                          "OF_INSTRUCTION_%s", type_maps.instruction_types,
                           max_type_value)
     gen_type_to_object_id(out, "queue_prop_type_to_id", "OF_QUEUE_PROP",
                           "OF_QUEUE_PROP_%s", type_maps.queue_prop_types,
@@ -254,6 +132,9 @@
     gen_type_to_object_id(out, "hello_elem_type_to_id", "OF_HELLO_ELEM",
                           "OF_HELLO_ELEM_%s", type_maps.hello_elem_types,
                           max_type_value)
+    gen_type_to_object_id(out, "group_mod_type_to_id", "OF_GROUP_MOD",
+                          "OF_GROUP_%s", type_maps.group_mod_types,
+                          max_type_value)
 
     # FIXME:  Multipart re-organization
     gen_type_to_object_id(out, "stats_request_type_to_id", "OF_STATS_REQUEST",
@@ -270,15 +151,13 @@
     gen_type_to_object_id(out, "message_type_to_id", "OF_MESSAGE",
                           "OF_%s", type_maps.message_types, max_type_value)
 
-    gen_object_id_to_type(out)
-    gen_object_id_to_extension_data(out)
-    # Don't need array mapping ID to stats types right now; handled directly
-    # gen_object_id_to_stats_type(out)
-
+    gen_type_to_object_id(out, "bsn_tlv_type_to_id", "OF_BSN_TLV",
+                          "OF_BSN_TLV_%s", type_maps.bsn_tlv_types,
+                          max_type_value)
 
 def gen_type_to_obj_map_functions(out):
     """
-    Generate the templated static inline type map functions
+    Generate the templated type map functions
     @param out The file handle to write to
     """
 
@@ -301,10 +180,10 @@
  * @param version The version associated with the check
  * @return The %(name)s OF object type
  * @return OF_OBJECT_INVALID if type does not map to an object
- * 
+ *
  */
-static inline of_object_id_t
-of_%(name)s_to_object_id(int %(name)s, of_version_t version) 
+of_object_id_t
+of_%(name)s_to_object_id(int %(name)s, of_version_t version)
 {
     if (!OF_VERSION_OKAY(version)) {
         return OF_OBJECT_INVALID;
@@ -332,10 +211,10 @@
  * @param version The version associated with the check
  * @return The %(name)s OF object type
  * @return OF_OBJECT_INVALID if type does not map to an object
- * 
+ *
  */
-static inline of_object_id_t
-of_%(name)s_to_object_id(int %(name)s, of_version_t version) 
+of_object_id_t
+of_%(name)s_to_object_id(int %(name)s, of_version_t version)
 {
     if (!OF_VERSION_OKAY(version)) {
         return OF_OBJECT_INVALID;
@@ -367,10 +246,10 @@
  * @param version The version associated with the check
  * @return The %(name)s OF object type
  * @return OF_OBJECT_INVALID if type does not map to an object
- * 
+ *
  */
-static inline of_object_id_t
-of_%(name)s_to_object_id(int %(name)s, of_version_t version) 
+of_object_id_t
+of_%(name)s_to_object_id(int %(name)s, of_version_t version)
 {
     if (!OF_VERSION_OKAY(version)) {
         return OF_OBJECT_INVALID;
@@ -385,6 +264,42 @@
     return of_%(name)s_type_to_id[version][%(name)s];
 }
 """
+
+    error_msg_template = """
+/**
+ * %(name)s wire type to object ID array.
+ * Treat as private; use function accessor below
+ */
+
+extern const of_object_id_t *const of_%(name)s_type_to_id[OF_VERSION_ARRAY_MAX];
+
+#define OF_%(u_name)s_ITEM_COUNT %(ar_len)d\n
+
+/**
+ * Map an %(name)s wire value to an OF object
+ * @param %(name)s The %(name)s type wire value
+ * @param version The version associated with the check
+ * @return The %(name)s OF object type
+ * @return OF_OBJECT_INVALID if type does not map to an object
+ *
+ */
+of_object_id_t
+of_error_msg_to_object_id(uint16_t %(name)s, of_version_t version)
+{
+    if (!OF_VERSION_OKAY(version)) {
+        return OF_OBJECT_INVALID;
+    }
+    if (%(name)s == OF_EXPERIMENTER_TYPE) {
+        return OF_EXPERIMENTER_ERROR_MSG;
+    }
+    if (%(name)s < 0 || %(name)s >= OF_%(u_name)s_ITEM_COUNT) {
+        return OF_OBJECT_INVALID;
+    }
+
+    return of_%(name)s_type_to_id[version][%(name)s];
+}
+"""
+
     # Experimenter mapping functions
     # Currently we support very few candidates, so we just do a
     # list of if/elses
@@ -402,7 +317,7 @@
  * @todo put OF_EXPERIMENTER_<name> in loci_base.h
  */
 
-static inline of_object_id_t
+of_object_id_t
 of_message_experimenter_to_object_id(of_message_t msg, of_version_t version) {
     uint32_t experimenter_id;
     uint32_t subtype;
@@ -417,7 +332,7 @@
     for version, experimenter_lists in type_maps.extension_message_subtype.items():
         for exp, subtypes in experimenter_lists.items():
             experimenter_function += """
-    if ((experimenter_id == OF_EXPERIMENTER_ID_%(exp_name)s) && 
+    if ((experimenter_id == OF_EXPERIMENTER_ID_%(exp_name)s) &&
             (version == %(ver_name)s)) {
 """ % dict(exp_name=exp.upper(), ver_name=of_g.wire_ver_map[version])
             for ext_msg, subtype in subtypes.items():
@@ -452,13 +367,16 @@
  * @returns object ID or OF_OBJECT_INVALID if parse error
  */
 
-static inline of_object_id_t
+of_object_id_t
 of_message_to_object_id(of_message_t msg, int length) {
     uint8_t type;
     of_version_t ver;
     of_object_id_t obj_id;
     uint16_t stats_type;
+    uint16_t err_type;
     uint8_t flow_mod_cmd;
+    uint32_t experimenter, subtype;
+    uint16_t group_mod_cmd;
 
     if (length < OF_MESSAGE_MIN_LENGTH) {
         return OF_OBJECT_INVALID;
@@ -497,416 +415,188 @@
             return OF_OBJECT_INVALID;
         }
         stats_type = of_message_stats_type_get(msg);
-        if (obj_id == OF_STATS_REQUEST) {
-            obj_id = of_stats_request_to_object_id(stats_type, ver);
+        if (stats_type == OF_STATS_TYPE_EXPERIMENTER) {
+            if (length < OF_MESSAGE_STATS_EXPERIMENTER_MIN_LENGTH) {
+                return OF_OBJECT_INVALID;
+            }
+            experimenter = of_message_stats_experimenter_id_get(msg);
+            subtype = of_message_stats_experimenter_subtype_get(msg);
+            if (obj_id == OF_STATS_REQUEST) {
+                obj_id = of_experimenter_stats_request_to_object_id(experimenter, subtype, ver);
+            } else {
+                obj_id = of_experimenter_stats_reply_to_object_id(experimenter, subtype, ver);
+            }
         } else {
-            obj_id = of_stats_reply_to_object_id(stats_type, ver);
+            if (obj_id == OF_STATS_REQUEST) {
+                obj_id = of_stats_request_to_object_id(stats_type, ver);
+            } else {
+                obj_id = of_stats_reply_to_object_id(stats_type, ver);
+            }
         }
     }
 
+    if (obj_id == OF_ERROR_MSG) {
+        if (length < OF_MESSAGE_MIN_ERROR_LENGTH) {
+            return OF_OBJECT_INVALID;
+        }
+        err_type = of_message_error_type_get(msg);
+        obj_id = of_error_msg_to_object_id(err_type, ver);
+    }
+
+    if (obj_id == OF_GROUP_MOD) {
+        if (length < OF_MESSAGE_MIN_GROUP_MOD_LENGTH) {
+            return OF_OBJECT_INVALID;
+        }
+        group_mod_cmd = of_message_group_mod_command_get(msg);
+        obj_id = of_group_mod_to_object_id(group_mod_cmd, ver);
+    }
+
     return obj_id;
 }
 """
 
+    oxm_template = """
+/**
+ * oxm wire type to object ID array.
+ * Treat as private; use function accessor below
+ */
+
+extern const of_object_id_t *const of_oxm_type_to_id[OF_VERSION_ARRAY_MAX];
+
+#define OF_OXM_ITEM_COUNT %(ar_len)d\n
+
+/**
+ * Map an oxm wire value to an OF object
+ * @param oxm The oxm type wire value
+ * @param version The version associated with the check
+ * @return The oxm OF object type
+ * @return OF_OBJECT_INVALID if type does not map to an object
+ *
+ */
+of_object_id_t
+of_oxm_to_object_id(uint32_t type_len, of_version_t version)
+{
+    if (!OF_VERSION_OKAY(version)) {
+        return OF_OBJECT_INVALID;
+    }
+
+    uint16_t class = (type_len >> 16) & 0xffff;
+    uint8_t masked_type = (type_len >> 8) & 0xff;
+
+    if (class == 0x8000) {
+        if (masked_type < 0 || masked_type >= OF_OXM_ITEM_COUNT) {
+            return OF_OBJECT_INVALID;
+        }
+
+        return of_oxm_type_to_id[version][masked_type];
+    } else if (class == 0x0003) {
+        switch (masked_type) {
+        case 0x00: return OF_OXM_BSN_IN_PORTS_128;
+        case 0x01: return OF_OXM_BSN_IN_PORTS_128_MASKED;
+        case 0x02: return OF_OXM_BSN_LAG_ID;
+        case 0x03: return OF_OXM_BSN_LAG_ID_MASKED;
+        case 0x04: return OF_OXM_BSN_VRF;
+        case 0x05: return OF_OXM_BSN_VRF_MASKED;
+        case 0x06: return OF_OXM_BSN_GLOBAL_VRF_ALLOWED;
+        case 0x07: return OF_OXM_BSN_GLOBAL_VRF_ALLOWED_MASKED;
+        case 0x08: return OF_OXM_BSN_L3_INTERFACE_CLASS_ID;
+        case 0x09: return OF_OXM_BSN_L3_INTERFACE_CLASS_ID_MASKED;
+        case 0x0a: return OF_OXM_BSN_L3_SRC_CLASS_ID;
+        case 0x0b: return OF_OXM_BSN_L3_SRC_CLASS_ID_MASKED;
+        case 0x0c: return OF_OXM_BSN_L3_DST_CLASS_ID;
+        case 0x0d: return OF_OXM_BSN_L3_DST_CLASS_ID_MASKED;
+        default: return OF_OBJECT_INVALID;
+        }
+    } else {
+        return OF_OBJECT_INVALID;
+    }
+}
+"""
+
     # Action types array gen
     ar_len = type_maps.type_array_len(type_maps.action_types, max_type_value)
-    out.write(map_with_experimenter_template % 
+    out.write(map_with_experimenter_template %
               dict(name="action", u_name="ACTION", ar_len=ar_len))
 
     # Action ID types array gen
     ar_len = type_maps.type_array_len(type_maps.action_id_types, max_type_value)
-    out.write(map_with_experimenter_template % 
+    out.write(map_with_experimenter_template %
               dict(name="action_id", u_name="ACTION_ID", ar_len=ar_len))
 
     # Instruction types array gen
     ar_len = type_maps.type_array_len(type_maps.instruction_types,
                                       max_type_value)
-    out.write(map_with_experimenter_template % 
+    out.write(map_with_experimenter_template %
               dict(name="instruction", u_name="INSTRUCTION", ar_len=ar_len))
 
     # Queue prop types array gen
     ar_len = type_maps.type_array_len(type_maps.queue_prop_types,
                                       max_type_value)
-    out.write(map_with_experimenter_template % 
+    out.write(map_with_experimenter_template %
               dict(name="queue_prop", u_name="QUEUE_PROP", ar_len=ar_len))
 
     # Table feature prop types array gen
     ar_len = type_maps.type_array_len(type_maps.table_feature_prop_types,
                                       max_type_value)
-    out.write(map_with_experimenter_template % 
+    out.write(map_with_experimenter_template %
               dict(name="table_feature_prop", u_name="TABLE_FEATURE_PROP",
                    ar_len=ar_len))
 
     # Meter band types array gen
     ar_len = type_maps.type_array_len(type_maps.meter_band_types,
                                       max_type_value)
-    out.write(map_with_experimenter_template % 
+    out.write(map_with_experimenter_template %
               dict(name="meter_band", u_name="METER_BAND", ar_len=ar_len))
 
     # Hello elem types array gen
     ar_len = type_maps.type_array_len(type_maps.hello_elem_types,
                                       max_type_value)
-    out.write(map_template % 
+    out.write(map_template %
               dict(name="hello_elem", u_name="HELLO_ELEM", ar_len=ar_len))
 
     # Stats types array gen
     ar_len = type_maps.type_array_len(type_maps.stats_types,
                                       max_type_value)
-    out.write(stats_template % 
+    out.write(stats_template %
               dict(name="stats_reply", u_name="STATS_REPLY", ar_len=ar_len))
-    out.write(stats_template % 
-              dict(name="stats_request", u_name="STATS_REQUEST", 
+    out.write(stats_template %
+              dict(name="stats_request", u_name="STATS_REQUEST",
                    ar_len=ar_len))
 
+    ar_len = type_maps.type_array_len(type_maps.error_types,
+                                      max_type_value)
+    out.write(error_msg_template %
+              dict(name="error_msg", u_name="ERROR_MSG", ar_len=ar_len))
+#     out.write(error_msg_function)
+
     ar_len = type_maps.type_array_len(type_maps.flow_mod_types, max_type_value)
-    out.write(map_template % 
+    out.write(map_template %
               dict(name="flow_mod", u_name="FLOW_MOD", ar_len=ar_len))
 
+    ar_len = type_maps.type_array_len(type_maps.group_mod_types,
+                                      max_type_value)
+    out.write(map_template %
+              dict(name="group_mod", u_name="GROUP_MOD", ar_len=ar_len))
+
+    # OXM
     ar_len = type_maps.type_array_len(type_maps.oxm_types, max_type_value)
     out.write("""
 /* NOTE: We could optimize the OXM and only generate OF 1.2 versions. */
 """)
-    out.write(map_template % 
-              dict(name="oxm", u_name="OXM", ar_len=ar_len))
+    out.write(oxm_template % dict(ar_len=ar_len))
 
+    # Messages
     out.write(experimenter_function)
     # Must follow stats reply/request
     ar_len = type_maps.type_array_len(type_maps.message_types, max_type_value)
-    out.write(msg_template % 
+    out.write(msg_template %
               dict(name="message", u_name="MESSAGE", ar_len=ar_len))
 
-def gen_obj_to_type_map_functions(out):
-    """
-    Generate the static line maps from object IDs to types
-    @param out The file handle to write to
-    """
-
-    ################################################################
-    # Generate object ID to primary type map
-    ################################################################
-
-    out.write("""
-extern const int *const of_object_to_type_map[OF_VERSION_ARRAY_MAX];
-
-/**
- * Map an object ID to its primary wire type value
- * @param id An object ID
- * @return For message objects, the type value in the OpenFlow header
- * @return For non-message objects such as actions, instructions, OXMs
- * returns the type value that appears in the respective sub-header
- * @return -1 For improper version or out of bounds input
- *
- * NOTE that for stats request/reply, returns the header type, not the
- * sub-type
- *
- * Also, note that the value is returned as a signed integer.  So -1 is
- * an error code, while 0xffff is the usual "experimenter" code.
- */
-static inline int
-of_object_to_wire_type(of_object_id_t id, of_version_t version)
-{
-    if (!OF_VERSION_OKAY(version)) {
-        return -1;
-    }
-    if (id < 0 || id >= OF_OBJECT_COUNT) {
-        return -1;
-    }
-    return of_object_to_type_map[version][id];
-}
-
-""")
-
-    # Now for experimenter ids
-    out.write("""
-/**
- * Map from object ID to a triple, (is_extension, experimenter id, subtype)
- */
-""")
-    out.write("""
-typedef struct of_experimenter_data_s {
-    int is_extension;  /* Boolean indication that this is an extension */
-    uint32_t experimenter_id;
-    uint32_t subtype;
-} of_experimenter_data_t;
-
-""")
-
-    out.write("""
-extern const of_experimenter_data_t *const of_object_to_extension_data[OF_VERSION_ARRAY_MAX];
-
-/**
- * Map from the object ID of an extension to the experimenter ID
- */
-static inline uint32_t
-of_extension_to_experimenter_id(of_object_id_t obj_id, of_version_t ver)
-{
-    if (obj_id < 0 || obj_id > OF_OBJECT_COUNT) {
-        return (uint32_t) -1;
-    }
-    /* @fixme: Verify ver? */
-    return of_object_to_extension_data[ver][obj_id].experimenter_id;
-}
-
-/**
- * Map from the object ID of an extension to the experimenter subtype
- */
-static inline uint32_t
-of_extension_to_experimenter_subtype(of_object_id_t obj_id, of_version_t ver)
-{
-    if (obj_id < 0 || obj_id > OF_OBJECT_COUNT) {
-        return (uint32_t) -1;
-    }
-    /* @fixme: Verify ver? */
-    return of_object_to_extension_data[ver][obj_id].subtype;
-}
-
-/**
- * Boolean function indicating the the given object ID/version
- * is recognized as a supported (decode-able) extension.
- */
-static inline int
-of_object_id_is_extension(of_object_id_t obj_id, of_version_t ver)
-{
-    if (obj_id < 0 || obj_id > OF_OBJECT_COUNT) {
-        return (uint32_t) -1;
-    }
-    /* @fixme: Verify ver? */
-    return of_object_to_extension_data[ver][obj_id].is_extension;
-}
-""")
-
-    ################################################################
-    # Generate object ID to the stats sub-type map
-    ################################################################
-
-    out.write("""
-/**
- * Map an object ID to a stats type
- * @param id An object ID
- * @return The wire value for the stats type
- * @return -1 if not supported for this version
- * @return -1 if id is not a specific stats type ID
- *
- * Note that the value is returned as a signed integer.  So -1 is
- * an error code, while 0xffff is the usual "experimenter" code.
- */
-
-static inline int
-of_object_to_stats_type(of_object_id_t id, of_version_t version)
-{
-    if (!OF_VERSION_OKAY(version)) {
-        return -1;
-    }
-    switch (id) {
-""")
-    # Assumes 1.2 contains all stats types and type values are
-    # the same across all versions
-    stats_names = dict()
-    for ver in of_g.of_version_range:
-        for name, value in type_maps.stats_types[ver].items():
-            if name in stats_names and (not value == stats_names[name]):
-                print "ERROR stats type differ violating assumption"
-                sys.exit(1)
-            stats_names[name] = value
-
-    for name, value in stats_names.items():
-        out.write("    case OF_%s_STATS_REPLY:\n" % name.upper())
-        out.write("    case OF_%s_STATS_REQUEST:\n" % name.upper())
-        for version in of_g.of_version_range:
-            if not name in type_maps.stats_types[version]:
-                out.write("        if (version == %s) break;\n" %
-                          of_g.of_version_wire2name[version])
-        out.write("        return %d;\n" % value)
-    out.write("""
-    default:
-        break;
-    }
-    return -1; /* Not recognized as stats type object for this version */
-}
-""")
-
-    ################################################################
-    # Generate object ID to the flow mod sub-type map
-    ################################################################
-
-    out.write("""
-/**
- * Map an object ID to a flow-mod command value
- * @param id An object ID
- * @return The wire value for the flow-mod command
- * @return -1 if not supported for this version
- * @return -1 if id is not a specific stats type ID
- *
- * Note that the value is returned as a signed integer.  So -1 is
- * an error code, while 0xffff is the usual "experimenter" code.
- */
-
-static inline int
-of_object_to_flow_mod_command(of_object_id_t id, of_version_t version)
-{
-    if (!OF_VERSION_OKAY(version)) {
-        return -1;
-    }
-    switch (id) {
-""")
-    # Assumes 1.2 contains all stats types and type values are
-    # the same across all versions
-    flow_mod_names = dict()
-    for ver in of_g.of_version_range:
-        for name, value in type_maps.flow_mod_types[ver].items():
-            if name in flow_mod_names and \
-                    (not value == flow_mod_names[name]):
-                print "ERROR flow mod command differ violating assumption"
-                sys.exit(1)
-            flow_mod_names[name] = value
-
-    for name, value in flow_mod_names.items():
-        out.write("    case OF_FLOW_%s:\n" % name.upper())
-        for version in of_g.of_version_range:
-            if not name in type_maps.flow_mod_types[version]:
-                out.write("        if (version == %s) break;\n" %
-                          of_g.of_version_wire2name[version])
-        out.write("        return %d;\n" % value)
-    out.write("""
-    default:
-        break;
-    }
-    return -1; /* Not recognized as flow mod type object for this version */
-}
-
-""")
-
-def gen_type_maps_header(out):
-    """
-    Generate various header file declarations for type maps
-    @param out The file handle to write to
-    """
-
-    out.write("""
-/**
- * Generic experimenter type value.  Applies to all except 
- * top level message: Action, instruction, error, stats, queue_props, oxm
- */
-#define OF_EXPERIMENTER_TYPE 0xffff
-""")
-    gen_type_to_obj_map_functions(out)
-    gen_obj_to_type_map_functions(out)
-
-    out.write("extern const int *const of_object_fixed_len[OF_VERSION_ARRAY_MAX];\n")
-
-    out.write("""
-/**
- * Map a message in a wire buffer object to its OF object id.
- * @param wbuf Pointer to a wire buffer object, populated with an OF message
- * @returns The object ID of the message
- * @returns OF_OBJECT_INVALID if unable to parse the message type
- */
-
-static inline of_object_id_t
-of_wire_object_id_get(of_wire_buffer_t *wbuf)
-{
-    of_message_t msg;
-
-    msg = (of_message_t)WBUF_BUF(wbuf);
-    return of_message_to_object_id(msg, WBUF_CURRENT_BYTES(wbuf));
-}
-
-/**
- * Use the type/length from the wire buffer and init the object
- * @param obj The object being initialized
- * @param base_object_id If > 0, this indicates the base object
- * @param max_len If > 0, the max length to expect for the obj
- * type for inheritance checking
- * @return OF_ERROR_
- *
- * Used for inheritance type objects such as actions and OXMs
- * The type is checked and if valid, the object is initialized.
- * Then the length is taken from the buffer.
- *
- * Note that the object version must already be properly set.
- */
-static inline int
-of_object_wire_init(of_object_t *obj, of_object_id_t base_object_id,
-                    int max_len)
-{
-    if (obj->wire_type_get != NULL) {
-        of_object_id_t id;
-        obj->wire_type_get(obj, &id);
-        if (!of_wire_id_valid(id, base_object_id)) {
-            return OF_ERROR_PARSE;
-        }
-        obj->object_id = id;
-        /* Call the init function for this object type; do not push to wire */
-        of_object_init_map[id]((of_object_t *)(obj), obj->version, -1, 0);
-    }
-    if (obj->wire_length_get != NULL) {
-        int length;
-        obj->wire_length_get(obj, &length);
-        if (length < 0 || (max_len > 0 && length > max_len)) {
-            return OF_ERROR_PARSE;
-        }
-        obj->length = length;
-    } else {
-        /* @fixme Does this cover everything else? */
-        obj->length = of_object_fixed_len[obj->version][base_object_id];
-    }
-
-    return OF_ERROR_NONE;
-}
-
-""")
-
-    # Generate the function that sets the object type fields
-    out.write("""
-
-/**
- * Map a message in a wire buffer object to its OF object id.
- * @param wbuf Pointer to a wire buffer object, populated with an OF message
- * @returns The object ID of the message
- * @returns OF_OBJECT_INVALID if unable to parse the message type
- *
- * Version must be set in the buffer prior to calling this routine
- */
-
-static inline int
-of_wire_message_object_id_set(of_wire_buffer_t *wbuf, of_object_id_t id)
-{
-    int type;
-    of_version_t ver;
-    of_message_t msg;
-
-    msg = (of_message_t)WBUF_BUF(wbuf);
-
-    ver = of_message_version_get(msg);
-
-    /* ASSERT(id is a message object) */
-
-    if ((type = of_object_to_wire_type(id, ver)) < 0) {
-        return OF_ERROR_PARAM;
-    }
-    of_message_type_set(msg, type);
-
-    if ((type = of_object_to_stats_type(id, ver)) >= 0) {
-        /* It's a stats obj */
-        of_message_stats_type_set(msg, type);
-    }
-    if ((type = of_object_to_flow_mod_command(id, ver)) >= 0) {
-        /* It's a flow mod obj */
-        of_message_flow_mod_command_set(msg, ver, type);
-    }
-    if (of_object_id_is_extension(id, ver)) {
-        uint32_t val32;
-
-        /* Set the experimenter and subtype codes */
-        val32 = of_extension_to_experimenter_id(id, ver);
-        of_message_experimenter_id_set(msg, val32);
-        val32 = of_extension_to_experimenter_subtype(id, ver);
-        of_message_experimenter_subtype_set(msg, val32);
-    }
-
-    return OF_ERROR_NONE;
-}
-""")
+    # BSN TLV elem types array gen
+    ar_len = type_maps.type_array_len(type_maps.bsn_tlv_types,
+                                      max_type_value)
+    out.write(map_template %
+              dict(name="bsn_tlv", u_name="BSN_TLV", ar_len=ar_len))
 
 def gen_type_data_header(out):
 
@@ -926,7 +616,7 @@
         out.write("""
 /**
  * Special length calculation for %(cls)s->%(name)s.
- * @param obj An object of type %(cls)s to check for 
+ * @param obj An object of type %(cls)s to check for
  * length of %(name)s
  * @param bytes[out] Where to store the calculated length
  *
@@ -937,7 +627,7 @@
 
 /**
  * Special offset calculation for %(cls)s->%(name)s.
- * @param obj An object of type %(cls)s to check for 
+ * @param obj An object of type %(cls)s to check for
  * length of %(name)s
  * @param offset[out] Where to store the calculated length
  *
@@ -966,7 +656,7 @@
 #  */
 # extern int of_length_%(s_cls)s_get(
 #     %(cls)s_t *obj, int *bytes);
-# """ % dict(cls=cls, s_cls=s_cls))        
+# """ % dict(cls=cls, s_cls=s_cls))
 
     out.write("""
 /****************************************************************
@@ -977,40 +667,33 @@
 extern void of_object_message_wire_length_set(of_object_t *obj, int bytes);
 
 extern void of_oxm_wire_length_get(of_object_t *obj, int *bytes);
-extern void of_oxm_wire_length_set(of_object_t *obj, int bytes);
 extern void of_oxm_wire_object_id_get(of_object_t *obj, of_object_id_t *id);
-extern void of_oxm_wire_object_id_set(of_object_t *obj, of_object_id_t id);
 
 extern void of_tlv16_wire_length_get(of_object_t *obj, int *bytes);
 extern void of_tlv16_wire_length_set(of_object_t *obj, int bytes);
 
-extern void of_tlv16_wire_object_id_set(of_object_t *obj, of_object_id_t id);
-
 /* Wire length is uint16 at front of structure */
 extern void of_u16_len_wire_length_get(of_object_t *obj, int *bytes);
 extern void of_u16_len_wire_length_set(of_object_t *obj, int bytes);
 
 extern void of_action_wire_object_id_get(of_object_t *obj, of_object_id_t *id);
 extern void of_action_id_wire_object_id_get(of_object_t *obj, of_object_id_t *id);
-extern void of_instruction_wire_object_id_get(of_object_t *obj, 
+extern void of_instruction_wire_object_id_get(of_object_t *obj,
     of_object_id_t *id);
-extern void of_queue_prop_wire_object_id_get(of_object_t *obj, 
+extern void of_queue_prop_wire_object_id_get(of_object_t *obj,
     of_object_id_t *id);
-extern void of_table_feature_prop_wire_object_id_get(of_object_t *obj, 
+extern void of_table_feature_prop_wire_object_id_get(of_object_t *obj,
     of_object_id_t *id);
-extern void of_meter_band_wire_object_id_get(of_object_t *obj, 
+extern void of_meter_band_wire_object_id_get(of_object_t *obj,
     of_object_id_t *id);
-extern void of_hello_elem_wire_object_id_get(of_object_t *obj, 
+extern void of_hello_elem_wire_object_id_get(of_object_t *obj,
+    of_object_id_t *id);
+extern void of_bsn_tlv_wire_object_id_get(of_object_t *obj,
     of_object_id_t *id);
 
-/** @fixme VERIFY LENGTH IS NUMBER OF BYTES OF ENTRY INCLUDING HDR */
-#define OF_OXM_MASKED_TYPE_GET(hdr) (((hdr) >> 8) & 0xff)
-#define OF_OXM_MASKED_TYPE_SET(hdr, val)                    \\
-    (hdr) = ((hdr) & 0xffff00ff) + (((val) & 0xff) << 8)
-
-#define OF_OXM_LENGTH_GET(hdr) ((hdr) & 0xff)
+#define OF_OXM_LENGTH_GET(hdr) (((hdr) & 0xff) + 4)
 #define OF_OXM_LENGTH_SET(hdr, val)                         \\
-    (hdr) = ((hdr) & 0xffffff00) + ((val) & 0xff)
+    (hdr) = ((hdr) & 0xffffff00) + (((val) - 4) & 0xff)
 
 extern void of_packet_queue_wire_length_get(of_object_t *obj, int *bytes);
 extern void of_packet_queue_wire_length_set(of_object_t *obj, int bytes);
@@ -1064,44 +747,43 @@
 };
 """)
 
-    
-################################################################
-################################################################
 
-# THIS IS PROBABLY NOT NEEDED AND MAY NOT BE CALLED CURRENTLY
-def gen_object_id_to_stats_type(out):
+def gen_extra_length_array(out):
+    """
+    Generate an array giving the extra lengths of all objects/versions
+    @param out The file handle to which to write
+    """
     out.write("""
 /**
- * Map from message object ID to stats type
- *
- * All message object IDs are mapped for simplicity
+ * An array with the number of bytes in the extra length part
+ * of each OF object
  */
 """)
+
     for version in of_g.of_version_range:
-        out.write("const int *of_object_to_stats_type_map_v%d = {\n" % (i+1))
-        out.write("    -1, /* of_object (invalid) */\n");
-        for cls in of_g.ordered_messages:
-            name = cls[3:]
-            name = name[:name.find("_stats")]
-            if (((cls in type_maps.stats_reply_list) or
-                 (cls in type_maps.stats_request_list)) and
-                name in type_maps.stats_types[i]):
-                out.write("    %d, /* %s */\n" %
-                          (type_maps.stats_types[i][name], cls))
-            else:
-                out.write("    -1, /* %s (invalid) */\n" % cls)
-        out.write("};\n\n")
+        out.write("""
+static const int\nof_object_extra_len_v%d[OF_OBJECT_COUNT] = {
+    -1,   /* of_object is not instantiable */
+""" % version)
+        for i, cls in enumerate(of_g.all_class_order):
+            comma = ","
+            if i == len(of_g.all_class_order) - 1:
+                comma = ""
+            val = "-1" + comma
+            if (cls, version) in of_g.base_length:
+                val = str(of_g.extra_length.get((cls, version), 0)) + comma
+            out.write("    %-5s /* %d: %s */\n" % (val, i + 1, cls))
+        out.write("};\n")
 
     out.write("""
 /**
- * Unified map, indexed by wire version which is 1-based.
+ * Unified map of extra length part of each object
  */
-const int *of_object_to_stats_type_map[OF_VERSION_ARRAY_MAX] = {
+const int *const of_object_extra_len[OF_VERSION_ARRAY_MAX] = {
     NULL,
 """)
     for version in of_g.of_version_range:
-        out.write("    of_object_to_stats_type_map_v%d,\n" % version)
+        out.write("    of_object_extra_len_v%d,\n" % version)
     out.write("""
 };
 """)
-
diff --git a/c_gen/c_validator_gen.py b/c_gen/c_validator_gen.py
index 3ab6acf..9bb0407 100644
--- a/c_gen/c_validator_gen.py
+++ b/c_gen/c_validator_gen.py
@@ -33,14 +33,16 @@
 """
 
 import sys
-import of_g
-import loxi_front_end.match as match
-import loxi_front_end.flags as flags
+import c_gen.of_g_legacy as of_g
+import c_gen.match as match
+import c_gen.flags as flags
 from generic_utils import *
-import loxi_front_end.type_maps as type_maps
+import c_gen.type_maps as type_maps
 import loxi_utils.loxi_utils as loxi_utils
-import loxi_front_end.identifiers as identifiers
+import c_gen.loxi_utils_legacy as loxi_utils
+import c_gen.identifiers as identifiers
 from c_test_gen import var_name_map
+from c_code_gen import v3_match_offset_get
 
 def gen_h(out, name):
     loxi_utils.gen_c_copy_license(out)
@@ -208,11 +210,29 @@
             return -1;
         }
 """ % dict(m_name=m_name, m_offset=m_offset, cls=cls))
+        elif version >= of_g.VERSION_1_2 and loxi_utils.cls_is_flow_mod(cls) and m_name == "instructions":
+            # See _FLOW_MOD_INSTRUCTIONS_OFFSET
+            match_offset = v3_match_offset_get(cls)
+            m_offset = '%s_offset' % m_name
+            out.write("""
+    {
+        uint16_t %(m_name)s_len, %(m_name)s_offset;
+        uint16_t match_len;
+        buf_u16_get(buf + %(match_offset)s + 2, &match_len);
+        %(m_name)s_offset = %(match_offset)s + OF_MATCH_BYTES(match_len);
+        %(m_name)s_len = len - %(m_name)s_offset;
+""" % dict(m_name=m_name, cls=cls, match_offset=match_offset))
+        elif cls == "of_bsn_gentable_entry_add" and m_name == "value":
+            continue;
+        elif cls == "of_bsn_gentable_entry_desc_stats_entry" and m_name == "value":
+            continue;
+        elif cls == "of_bsn_gentable_entry_stats_entry" and m_name == "stats":
+            continue;
         else:
             out.write("""
-    
+
     {    int %(m_name)s_len = len - %(m_offset)s;
-   
+
 """  % dict(m_name=m_name, m_offset=m_offset))
         out.write("""
         if (%(m_cls)s_%(ver_name)s_validate(buf + %(m_offset)s, %(m_name)s_len) < 0) {
@@ -240,7 +260,7 @@
         subclasses = type_maps.inheritance_map[e_cls]
         out.write("""\
     while (len >= %(fixed_len)s) {
-        of_object_id_t e_id; 
+        of_object_id_t e_id;
         uint16_t e_type, e_len;
         buf_u16_get(buf, &e_type);
         buf_u16_get(buf+2, &e_len);
diff --git a/c_gen/codegen.py b/c_gen/codegen.py
new file mode 100644
index 0000000..3249747
--- /dev/null
+++ b/c_gen/codegen.py
@@ -0,0 +1,145 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+"""
+Code generation
+
+These functions extract data from the IR and render templates with it.
+"""
+
+from collections import namedtuple
+from itertools import groupby
+from StringIO import StringIO
+import template_utils
+import loxi_globals
+import loxi_ir.ir as ir
+import util
+import c_code_gen
+import c_gen.of_g_legacy as of_g
+import c_gen.type_maps as type_maps
+import c_gen.c_type_maps as c_type_maps
+
+PushWireTypesData = namedtuple('PushWireTypesData',
+    ['class_name', 'versioned_type_members'])
+PushWireTypesMember = namedtuple('PushWireTypesMember',
+    ['name', 'offset', 'length', 'value'])
+
+def push_wire_types_data(uclass):
+    if uclass.virtual or not uclass.has_type_members:
+        return None
+
+    # Generate a dict of version -> list of PushWireTypesMember
+    type_members_by_version = {}
+    for version, ofclass in sorted(uclass.version_classes.items()):
+        pwtms = []
+        for m in ofclass.members:
+            if isinstance(m, ir.OFTypeMember):
+                if m.name == "version" and m.value == version.wire_version:
+                    # Special case for version
+                    pwtms.append(PushWireTypesMember(m.name, m.offset, m.length, "obj->version"))
+                else:
+                    pwtms.append(PushWireTypesMember(m.name, m.offset, m.length, hex(m.value)))
+        type_members_by_version[version] = pwtms
+
+    # Merge versions with identical type members
+    all_versions = sorted(type_members_by_version.keys())
+    versioned_type_members = []
+    for pwtms, versions in groupby(all_versions, type_members_by_version.get):
+        versioned_type_members.append((pwtms, list(versions)))
+
+    return PushWireTypesData(
+        class_name=uclass.name,
+        versioned_type_members=versioned_type_members)
+
+def generate_classes(install_dir):
+    for uclass in loxi_globals.unified.classes:
+        with template_utils.open_output(install_dir, "loci/src/%s.c" % uclass.name) as out:
+            util.render_template(out, "class.c",
+                push_wire_types_data=push_wire_types_data(uclass))
+            # Append legacy generated code
+            c_code_gen.gen_new_function_definitions(out, uclass.name)
+            c_code_gen.gen_accessor_definitions(out, uclass.name)
+
+# TODO remove header classes and use the corresponding class instead
+def generate_header_classes(install_dir):
+    for cls in of_g.standard_class_order:
+        if cls.find("_header") < 0:
+            continue
+        with template_utils.open_output(install_dir, "loci/src/%s.c" % cls) as out:
+            util.render_template(out, "class.c",
+                push_wire_types_data=None)
+            # Append legacy generated code
+            c_code_gen.gen_new_function_definitions(out, cls)
+            c_code_gen.gen_accessor_definitions(out, cls)
+
+def generate_classes_header(install_dir):
+    # Collect legacy code
+    tmp = StringIO()
+    c_code_gen.gen_struct_typedefs(tmp)
+    c_code_gen.gen_new_function_declarations(tmp)
+    c_code_gen.gen_accessor_declarations(tmp)
+    c_code_gen.gen_generics(tmp)
+
+    with template_utils.open_output(install_dir, "loci/inc/loci/loci_classes.h") as out:
+        util.render_template(out, "loci_classes.h",
+            legacy_code=tmp.getvalue())
+
+def generate_lists(install_dir):
+    for cls in of_g.ordered_list_objects:
+        with template_utils.open_output(install_dir, "loci/src/%s.c" % cls) as out:
+            util.render_template(out, "class.c",
+                push_wire_types_data=None)
+            # Append legacy generated code
+            c_code_gen.gen_new_function_definitions(out, cls)
+            c_code_gen.gen_list_accessors(out, cls)
+
+def generate_strings(install_dir):
+    object_id_strs = []
+    object_id_strs.append("of_object")
+    object_id_strs.extend(of_g.ordered_messages)
+    object_id_strs.extend(of_g.ordered_non_messages)
+    object_id_strs.extend(of_g.ordered_list_objects)
+    object_id_strs.extend(of_g.ordered_pseudo_objects)
+    object_id_strs.append("of_unknown_object")
+
+    with template_utils.open_output(install_dir, "loci/src/loci_strings.c") as out:
+        util.render_template(out, "loci_strings.c", object_id_strs=object_id_strs)
+
+def generate_init_map(install_dir):
+    with template_utils.open_output(install_dir, "loci/src/loci_init_map.c") as out:
+        util.render_template(out, "loci_init_map.c", classes=of_g.standard_class_order)
+
+def generate_type_maps(install_dir):
+    # Collect legacy code
+    tmp = StringIO()
+    c_type_maps.gen_type_to_obj_map_functions(tmp)
+    c_type_maps.gen_type_maps(tmp)
+    c_type_maps.gen_length_array(tmp)
+    c_type_maps.gen_extra_length_array(tmp)
+
+    with template_utils.open_output(install_dir, "loci/src/of_type_maps.c") as out:
+        util.render_template(out, "of_type_maps.c", legacy_code=tmp.getvalue())
diff --git a/loxi_front_end/flags.py b/c_gen/flags.py
similarity index 98%
rename from loxi_front_end/flags.py
rename to c_gen/flags.py
index 3c401f9..1fa4ae5 100644
--- a/loxi_front_end/flags.py
+++ b/c_gen/flags.py
@@ -36,7 +36,7 @@
 import sys
 import copy
 import type_maps
-import of_g
+import c_gen.of_g_legacy as of_g
 import re
 
 # These mark idents as _not_ flags and have precedence
@@ -73,4 +73,3 @@
             return True
 
     return False
-    
diff --git a/loxi_front_end/identifiers.py b/c_gen/identifiers.py
similarity index 98%
rename from loxi_front_end/identifiers.py
rename to c_gen/identifiers.py
index 91c0e57..5862967 100644
--- a/loxi_front_end/identifiers.py
+++ b/c_gen/identifiers.py
@@ -31,14 +31,13 @@
 
 import sys
 from generic_utils import *
-import of_g
 
 ##
 # The value to use when an identifier is not defined for a version
 UNDEFINED_IDENT_VALUE = 0
 
 def add_identifier(name, ofp_name, ofp_group, value, version, all_idents, idents_by_group):
-    assert(isinstance(value, int))
+    assert(isinstance(value, (int,long)))
     if name in all_idents:
         all_idents[name]["values_by_version"][version] = value
         if ((all_idents[name]["ofp_name"] != ofp_name or
diff --git a/c_gen/loci_utils.py b/c_gen/loci_utils.py
new file mode 100644
index 0000000..686fb0f
--- /dev/null
+++ b/c_gen/loci_utils.py
@@ -0,0 +1,243 @@
+import c_gen.of_g_legacy as of_g
+
+def class_signature(members):
+    """
+    Generate a signature string for a class in canonical form
+
+    @param cls The class whose signature is to be generated
+    """
+    return ";".join([",".join([x["m_type"], x["name"], str(x["offset"])])
+                     for x in members])
+
+def type_dec_to_count_base(m_type):
+    """
+    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
+    (uint8_t)
+
+    @param m_type The string type declaration to process
+    """
+    count = 1
+    chk_ar = m_type.split('[')
+    if len(chk_ar) > 1:
+        count_str = chk_ar[1].split(']')[0]
+        if count_str in of_g.ofp_constants:
+            count = of_g.ofp_constants[count_str]
+        else:
+            count = int(count_str)
+        base_type = chk_ar[0]
+    else:
+        base_type = m_type
+    return count, base_type
+
+def list_to_entry_type(cls):
+    """
+    Return the entry type for a list
+    """
+    slen = len("of_list_")
+    return "of_" + cls[slen:]
+
+def type_to_short_name(m_type):
+    if m_type in of_g.of_base_types:
+        tname = of_g.of_base_types[m_type]["short_name"]
+    elif m_type in of_g.of_mixed_types:
+        tname = of_g.of_mixed_types[m_type]["short_name"]
+    else:
+        tname = "unknown"
+    return tname
+
+def type_to_name_type(cls, member_name):
+    """
+    Generate the root name of a member for accessor functions, etc
+    @param cls The class name
+    @param member_name The member name
+    """
+    members = of_g.unified[cls]["union"]
+    if not member_name in members:
+        debug("Error:  %s is not in class %s for acc_name defn" %
+              (member_name, cls))
+        os.exit()
+
+    mem = members[member_name]
+    m_type = mem["m_type"]
+    id = mem["memid"]
+    tname = type_to_short_name(m_type)
+
+    return "o%d_m%d_%s" % (of_g.unified[cls]["object_id"], id, tname)
+
+
+def member_to_index(m_name, members):
+    """
+    Given a member name, return the index in the members dict
+    @param m_name The name of the data member to search for
+    @param members The dict of members
+    @return Index if found, -1 not found
+
+    Note we could generate an index when processing the original input
+    """
+    count = 0
+    for d in members:
+        if d["name"] == m_name:
+            return count
+        count += 1
+    return -1
+
+def member_base_type(cls, m_name):
+    """
+    Map a member to its of_ type
+    @param cls The class name
+    @param m_name The name of the member being gotten
+    @return The of_ type of the member
+    """
+    rv = of_g.unified[cls]["union"][m_name]["m_type"]
+    if rv[-2:] == "_t":
+        return rv
+    return rv + "_t"
+
+def member_type_is_octets(cls, m_name):
+    return member_base_type(cls, m_name) == "of_octets_t"
+
+def h_file_to_define(name):
+    """
+    Convert a .h file name to the define used for the header
+    """
+    h_name = name[:-2].upper()
+    h_name = "_" + h_name + "_H_"
+    return h_name
+
+def type_to_cof_type(m_type):
+    if m_type in of_g.of_base_types:
+        if "cof_type" in of_g.of_base_types[m_type]:
+            return of_g.of_base_types[m_type]["cof_type"]
+    return m_type
+
+
+def member_is_scalar(cls, m_name):
+    return of_g.unified[cls]["union"][m_name]["m_type"] in of_g.of_scalar_types
+
+def type_is_scalar(m_type):
+    return m_type in of_g.of_scalar_types
+
+def skip_member_name(name):
+    return name.find("pad") == 0 or name in of_g.skip_members
+
+def enum_name(cls):
+    """
+    Return the name used for an enum identifier for the given class
+    @param cls The class name
+    """
+    return cls.upper()
+
+def class_in_version(cls, ver):
+    """
+    Return boolean indicating if cls is defined for wire version ver
+    """
+
+    return (cls, ver) in of_g.base_length
+
+def instance_to_class(instance, parent):
+    """
+    Return the name of the class for an instance of inheritance type parent
+    """
+    return parent + "_" + instance
+
+def sub_class_to_var_name(cls):
+    """
+    Given a subclass name like of_action_output, generate the
+    name of a variable like 'output'
+    @param cls The class name
+    """
+    pass
+
+def class_is_var_len(cls, version):
+    # Match is special case.  Only version 1.2 (wire version 3) is var
+    if cls == "of_match":
+        return version == 3
+
+    return not (cls, version) in of_g.is_fixed_length
+
+def base_type_to_length(base_type, version):
+    if base_type + "_t" in of_g.of_base_types:
+        inst_len = of_g.of_base_types[base_type + "_t"]["bytes"]
+    else:
+        inst_len = of_g.base_length[(base_type, version)]
+
+def version_to_name(version):
+    """
+    Convert an integer version to the C macro name
+    """
+    return "OF_" + of_g.version_names[version]
+
+##
+# Is class a flow modify of some sort?
+
+def cls_is_flow_mod(cls):
+    return cls in ["of_flow_mod", "of_flow_modify", "of_flow_add", "of_flow_delete",
+                   "of_flow_modify_strict", "of_flow_delete_strict"]
+
+def all_member_types_get(cls, version):
+    """
+    Get the members and list of types for members of a given class
+    @param cls The class name to process
+    @param version The version for the class
+    """
+    member_types = []
+
+    if not version in of_g.unified[cls]:
+        return ([], [])
+
+    if "use_version" in of_g.unified[cls][version]:
+        v = of_g.unified[cls][version]["use_version"]
+        members = of_g.unified[cls][v]["members"]
+    else:
+        members = of_g.unified[cls][version]["members"]
+    # Accumulate variables that are supported
+    for member in members:
+        m_type = member["m_type"]
+        m_name = member["name"]
+        if skip_member_name(m_name):
+            continue
+        if not m_type in member_types:
+            member_types.append(m_type)
+
+    return (members, member_types)
+
+def list_name_extract(list_type):
+    """
+    Return the base name for a list object of the given type
+    @param list_type The type of the list as appears in the input,
+    for example list(of_port_desc_t).
+    @return A pair, (list-name, base-type) where list-name is the
+    base name for the list, for example of_list_port_desc, and base-type
+    is the type of list elements like of_port_desc_t
+    """
+    base_type = list_type[5:-1]
+    list_name = base_type
+    if list_name.find("of_") == 0:
+        list_name = list_name[3:]
+    if list_name[-2:] == "_t":
+        list_name = list_name[:-2]
+    list_name = "of_list_" + list_name
+    return (list_name, base_type)
+
+def version_to_name(version):
+    """
+    Convert an integer version to the C macro name
+    """
+    return "OF_" + of_g.version_names[version]
+
+def gen_c_copy_license(out):
+    """
+    Generate the top comments for copyright and license
+    """
+    import c_gen.util
+    c_gen.util.render_template(out, '_copyright.c')
+
+def accessor_returns_error(a_type, m_type):
+    is_var_len = (not type_is_scalar(m_type)) and \
+        [x for x in of_g.of_version_range if class_is_var_len(m_type[:-2], x)] != []
+    if a_type == "set" and is_var_len:
+        return True
+    elif m_type == "of_match_t":
+        return True
+    else:
+        return False
diff --git a/c_gen/loxi_utils_legacy.py b/c_gen/loxi_utils_legacy.py
new file mode 100644
index 0000000..4092d4f
--- /dev/null
+++ b/c_gen/loxi_utils_legacy.py
@@ -0,0 +1,510 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+"""
+@brief Utilities involving LOXI naming conventions
+
+Utility functions for OpenFlow class generation
+
+These may need to be sorted out into language specific functions
+"""
+
+import sys
+import c_gen.of_g_legacy as of_g
+import tenjin
+from generic_utils import find, memoize
+
+def class_signature(members):
+    """
+    Generate a signature string for a class in canonical form
+
+    @param cls The class whose signature is to be generated
+    """
+    return ";".join([",".join([x["m_type"], x["name"], str(x["offset"])])
+                     for x in members])
+
+def type_dec_to_count_base(m_type):
+    """
+    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
+    (uint8_t)
+
+    @param m_type The string type declaration to process
+    """
+    count = 1
+    chk_ar = m_type.split('[')
+    if len(chk_ar) > 1:
+        count_str = chk_ar[1].split(']')[0]
+        if count_str in of_g.ofp_constants:
+            count = of_g.ofp_constants[count_str]
+        else:
+            count = int(count_str)
+        base_type = chk_ar[0]
+    else:
+        base_type = m_type
+    return count, base_type
+
+##
+# Class types:
+#
+# Virtual
+#    A virtual class is one which does not have an explicit wire
+#    representation.  For example, an inheritance super class
+#    or a list type.
+#
+# List
+#    A list of objects of some other type
+#
+# TLV16
+#    The wire represenation starts with 16-bit type and length fields
+#
+# OXM
+#    An extensible match object
+#
+# Message
+#    A top level OpenFlow message
+#
+#
+
+def class_is_message(cls):
+    """
+    Return True if cls is a message object based on info in unified
+    """
+    return "xid" in of_g.unified[cls]["union"] and cls != "of_header"
+
+def class_is_tlv16(cls):
+    """
+    Return True if cls_name is an object which uses uint16 for type and length
+    """
+    if cls.find("of_action") == 0: # Includes of_action_id classes
+        return True
+    if cls.find("of_instruction") == 0:
+        return True
+    if cls.find("of_queue_prop") == 0:
+        return True
+    if cls.find("of_table_feature_prop") == 0:
+        return True
+    # *sigh*
+    if cls.find("of_meter_band_stats") == 0:  # NOT A TLV
+        return False
+    if cls.find("of_meter_band") == 0:
+        return True
+    if cls.find("of_hello_elem") == 0:
+        return True
+    if cls == "of_match_v3":
+        return True
+    if cls == "of_match_v4":
+        return True
+    if cls.find("of_bsn_tlv") == 0:
+        return True
+    return False
+
+def class_is_u16_len(cls):
+    """
+    Return True if cls_name is an object which uses initial uint16 length
+    """
+    return cls in ["of_group_desc_stats_entry", "of_group_stats_entry",
+                   "of_flow_stats_entry", "of_bucket", "of_table_features",
+                   "of_bsn_port_counter_stats_entry", "of_bsn_vlan_counter_stats_entry",
+                   "of_bsn_gentable_entry_desc_stats_entry", "of_bsn_gentable_entry_stats_entry",
+                   "of_bsn_gentable_desc_stats_entry"]
+
+def class_is_oxm(cls):
+    """
+    Return True if cls_name is an OXM object
+    """
+    if cls.find("of_oxm") == 0:
+        return True
+    return False
+
+def class_is_action(cls):
+    """
+    Return True if cls_name is an action object
+
+    Note that action_id is not an action object, though it has
+    the same header.  It looks like an action header, but the type
+    is used to identify a kind of action, it does not indicate the
+    type of the object following.
+    """
+    if cls.find("of_action_id") == 0:
+        return False
+    if cls.find("of_action") == 0:
+        return True
+
+    # For each vendor, check for vendor specific action
+    for exp in of_g.experimenter_name_to_id:
+        if cls.find("of_action" + exp) == 0:
+            return True
+
+    return False
+
+def class_is_action_id(cls):
+    """
+    Return True if cls_name is an action object
+
+    Note that action_id is not an action object, though it has
+    the same header.  It looks like an action header, but the type
+    is used to identify a kind of action, it does not indicate the
+    type of the object following.
+    """
+    if cls.find("of_action_id") == 0:
+        return True
+
+    # For each vendor, check for vendor specific action
+    for exp in of_g.experimenter_name_to_id:
+        if cls.find("of_action_id_" + exp) == 0:
+            return True
+
+    return False
+
+def class_is_instruction(cls):
+    """
+    Return True if cls_name is an instruction object
+    """
+    if cls.find("of_instruction_id") == 0:
+        return False
+    if cls.find("of_instruction") == 0:
+        return True
+
+    # For each vendor, check for vendor specific action
+    for exp in of_g.experimenter_name_to_id:
+        if cls.find("of_instruction_" + exp) == 0:
+            return True
+
+    return False
+
+def class_is_meter_band(cls):
+    """
+    Return True if cls_name is an instruction object
+    """
+    # meter_band_stats is not a member of meter_band class hierarchy
+    if cls.find("of_meter_band_stats") == 0:
+        return False
+    if cls.find("of_meter_band") == 0:
+        return True
+    return False
+
+def class_is_hello_elem(cls):
+    """
+    Return True if cls_name is an instruction object
+    """
+    if cls.find("of_hello_elem") == 0:
+        return True
+    return False
+
+def class_is_queue_prop(cls):
+    """
+    Return True if cls_name is a queue_prop object
+    """
+    if cls.find("of_queue_prop") == 0:
+        return True
+
+    # For each vendor, check for vendor specific action
+    for exp in of_g.experimenter_name_to_id:
+        if cls.find("of_queue_prop_" + exp) == 0:
+            return True
+
+    return False
+
+def class_is_table_feature_prop(cls):
+    """
+    Return True if cls_name is a queue_prop object
+    """
+    if cls.find("of_table_feature_prop") == 0:
+        return True
+    return False
+
+def class_is_stats_message(cls):
+    """
+    Return True if cls_name is a message object based on info in unified
+    """
+
+    return "stats_type" in of_g.unified[cls]["union"]
+
+def class_is_list(cls):
+    """
+    Return True if cls_name is a list object
+    """
+    return (cls.find("of_list_") == 0)
+
+def class_is_bsn_tlv(cls):
+    """
+    Return True if cls_name is a BSN TLV object
+    """
+    if cls.find("of_bsn_tlv") == 0:
+        return True
+    return False
+
+def type_is_of_object(m_type):
+    """
+    Return True if m_type is an OF object type
+    """
+    # Remove _t from the type id and see if key for unified class
+    if m_type[-2:] == "_t":
+        m_type = m_type[:-2]
+    return m_type in of_g.unified
+
+def list_to_entry_type(cls):
+    """
+    Return the entry type for a list
+    """
+    slen = len("of_list_")
+    return "of_" + cls[slen:]
+
+def type_to_short_name(m_type):
+    if m_type in of_g.of_base_types:
+        tname = of_g.of_base_types[m_type]["short_name"]
+    elif m_type in of_g.of_mixed_types:
+        tname = of_g.of_mixed_types[m_type]["short_name"]
+    else:
+        tname = "unknown"
+    return tname
+
+def type_to_name_type(cls, member_name):
+    """
+    Generate the root name of a member for accessor functions, etc
+    @param cls The class name
+    @param member_name The member name
+    """
+    members = of_g.unified[cls]["union"]
+    if not member_name in members:
+        debug("Error:  %s is not in class %s for acc_name defn" %
+              (member_name, cls))
+        os.exit()
+
+    mem = members[member_name]
+    m_type = mem["m_type"]
+    id = mem["memid"]
+    tname = type_to_short_name(m_type)
+
+    return "o%d_m%d_%s" % (of_g.unified[cls]["object_id"], id, tname)
+
+
+def member_to_index(m_name, members):
+    """
+    Given a member name, return the index in the members dict
+    @param m_name The name of the data member to search for
+    @param members The dict of members
+    @return Index if found, -1 not found
+
+    Note we could generate an index when processing the original input
+    """
+    count = 0
+    for d in members:
+        if d["name"] == m_name:
+            return count
+        count += 1
+    return -1
+
+def member_base_type(cls, m_name):
+    """
+    Map a member to its of_ type
+    @param cls The class name
+    @param m_name The name of the member being gotten
+    @return The of_ type of the member
+    """
+    rv = of_g.unified[cls]["union"][m_name]["m_type"]
+    if rv[-2:] == "_t":
+        return rv
+    return rv + "_t"
+
+def member_type_is_octets(cls, m_name):
+    return member_base_type(cls, m_name) == "of_octets_t"
+
+def h_file_to_define(name):
+    """
+    Convert a .h file name to the define used for the header
+    """
+    h_name = name[:-2].upper()
+    h_name = "_" + h_name + "_H_"
+    return h_name
+
+def type_to_cof_type(m_type):
+    if m_type in of_g.of_base_types:
+        if "cof_type" in of_g.of_base_types[m_type]:
+            return of_g.of_base_types[m_type]["cof_type"]
+    return m_type
+
+
+def member_is_scalar(cls, m_name):
+    return of_g.unified[cls]["union"][m_name]["m_type"] in of_g.of_scalar_types
+
+def type_is_scalar(m_type):
+    return m_type in of_g.of_scalar_types
+
+def skip_member_name(name):
+    return name.find("pad") == 0 or name in of_g.skip_members
+
+def enum_name(cls):
+    """
+    Return the name used for an enum identifier for the given class
+    @param cls The class name
+    """
+    return cls.upper()
+
+def class_in_version(cls, ver):
+    """
+    Return boolean indicating if cls is defined for wire version ver
+    """
+
+    return (cls, ver) in of_g.base_length
+
+def instance_to_class(instance, parent):
+    """
+    Return the name of the class for an instance of inheritance type parent
+    """
+    return parent + "_" + instance
+
+def sub_class_to_var_name(cls):
+    """
+    Given a subclass name like of_action_output, generate the
+    name of a variable like 'output'
+    @param cls The class name
+    """
+    pass
+
+def class_is_var_len(cls, version):
+    # Match is special case.  Only version 1.2 (wire version 3) is var
+    if cls == "of_match":
+        return version == 3
+
+    return not (cls, version) in of_g.is_fixed_length
+
+def base_type_to_length(base_type, version):
+    if base_type + "_t" in of_g.of_base_types:
+        inst_len = of_g.of_base_types[base_type + "_t"]["bytes"]
+    else:
+        inst_len = of_g.base_length[(base_type, version)]
+
+def version_to_name(version):
+    """
+    Convert an integer version to the C macro name
+    """
+    return "OF_" + of_g.version_names[version]
+
+##
+# Is class a flow modify of some sort?
+
+def cls_is_flow_mod(cls):
+    return cls in ["of_flow_mod", "of_flow_modify", "of_flow_add", "of_flow_delete",
+                   "of_flow_modify_strict", "of_flow_delete_strict"]
+
+
+def all_member_types_get(cls, version):
+    """
+    Get the members and list of types for members of a given class
+    @param cls The class name to process
+    @param version The version for the class
+    """
+    member_types = []
+
+    if not version in of_g.unified[cls]:
+        return ([], [])
+
+    if "use_version" in of_g.unified[cls][version]:
+        v = of_g.unified[cls][version]["use_version"]
+        members = of_g.unified[cls][v]["members"]
+    else:
+        members = of_g.unified[cls][version]["members"]
+    # Accumulate variables that are supported
+    for member in members:
+        m_type = member["m_type"]
+        m_name = member["name"]
+        if skip_member_name(m_name):
+            continue
+        if not m_type in member_types:
+            member_types.append(m_type)
+
+    return (members, member_types)
+
+def list_name_extract(list_type):
+    """
+    Return the base name for a list object of the given type
+    @param list_type The type of the list as appears in the input,
+    for example list(of_port_desc_t).
+    @return A pair, (list-name, base-type) where list-name is the
+    base name for the list, for example of_list_port_desc, and base-type
+    is the type of list elements like of_port_desc_t
+    """
+    base_type = list_type[5:-1]
+    list_name = base_type
+    if list_name.find("of_") == 0:
+        list_name = list_name[3:]
+    if list_name[-2:] == "_t":
+        list_name = list_name[:-2]
+    list_name = "of_list_" + list_name
+    return (list_name, base_type)
+
+def version_to_name(version):
+    """
+    Convert an integer version to the C macro name
+    """
+    return "OF_" + of_g.version_names[version]
+
+def gen_c_copy_license(out):
+    """
+    Generate the top comments for copyright and license
+    """
+    import c_gen.util
+    c_gen.util.render_template(out, '_copyright.c')
+
+def accessor_returns_error(a_type, m_type):
+    is_var_len = (not type_is_scalar(m_type)) and \
+        [x for x in of_g.of_version_range if class_is_var_len(m_type[:-2], x)] != []
+    if a_type == "set" and is_var_len:
+        return True
+    elif m_type == "of_match_t":
+        return True
+    else:
+        return False
+
+def render_template(out, name, path, context, prefix = None):
+    """
+    Render a template using tenjin.
+    out: a file-like object
+    name: name of the template
+    path: array of directories to search for the template
+    context: dictionary of variables to pass to the template
+    prefix: optional prefix to use for embedding (for other languages than python)
+    """
+    pp = [ tenjin.PrefixedLinePreprocessor(prefix=prefix) if prefix else tenjin.PrefixedLinePreprocessor() ] # support "::" syntax
+    template_globals = { "to_str": str, "escape": str } # disable HTML escaping
+    engine = TemplateEngine(path=path, pp=pp)
+    out.write(engine.render(name, context, template_globals))
+
+def render_static(out, name, path):
+    """
+    Write out a static template.
+    out: a file-like object
+    name: name of the template
+    path: array of directories to search for the template
+    """
+    # Reuse the tenjin logic for finding the template
+    template_filename = tenjin.FileSystemLoader().find(name, path)
+    if not template_filename:
+        raise ValueError("template %s not found" % name)
+    with open(template_filename) as infile:
+        out.write(infile.read())
diff --git a/loxi_front_end/match.py b/c_gen/match.py
similarity index 81%
rename from loxi_front_end/match.py
rename to c_gen/match.py
index 4ab3126..611a80d 100644
--- a/loxi_front_end/match.py
+++ b/c_gen/match.py
@@ -30,10 +30,9 @@
 # @fixme This still has lots of C specific code that should be moved into c_gen
 
 import sys
-import of_g
+import c_gen.of_g_legacy as of_g
 from generic_utils import *
-import oxm
-import loxi_utils.loxi_utils as loxi_utils
+import c_gen.loxi_utils_legacy as loxi_utils
 
 #
 # Use 1.2 match semantics for common case
@@ -146,24 +145,7 @@
         takes_mask_in_spec=False,
         order=211,
         ),
-    ipv4_src = dict(
-        name="ipv4_src",
-        m_type="uint32_t",
-        v1_wc_shift=8,
-        print_type="PRIx32",
-        conditions="is_ipv4(match)",
-        takes_mask_in_spec=True,
-        order=300,
-        ),
-    ipv4_dst = dict(
-        name="ipv4_dst",
-        m_type="uint32_t",
-        v1_wc_shift=14,
-        print_type="PRIx32",
-        conditions="is_ipv4(match)",
-        takes_mask_in_spec=True,
-        order=301,
-        ),
+
     ip_dscp = dict(
         name="ip_dscp",
         m_type="uint8_t",
@@ -192,6 +174,24 @@
         takes_mask_in_spec=False,
         order=320,
         ),
+    ipv4_src = dict(
+        name="ipv4_src",
+        m_type="of_ipv4_t",
+        v1_wc_shift=8,
+        print_type="PRIx32",
+        conditions="is_ipv4(match)",
+        takes_mask_in_spec=True,
+        order=330,
+        ),
+    ipv4_dst = dict(
+        name="ipv4_dst",
+        m_type="of_ipv4_t",
+        v1_wc_shift=14,
+        print_type="PRIx32",
+        conditions="is_ipv4(match)",
+        takes_mask_in_spec=True,
+        order=331,
+        ),
 
     tcp_dst = dict(
         name="tcp_dst",
@@ -271,7 +271,7 @@
         print_type="PRIx16",
         conditions="is_arp(match)",
         takes_mask_in_spec=False,
-        order=250,
+        order=450,
         ),
 
     arp_spa = dict(
@@ -280,7 +280,7 @@
         print_type="PRIx32",
         conditions="is_arp(match)",
         takes_mask_in_spec=True,
-        order=251,
+        order=451,
         ),
     arp_tpa = dict(
         name="arp_tpa",
@@ -288,7 +288,7 @@
         print_type="PRIx32",
         conditions="is_arp(match)",
         takes_mask_in_spec=True,
-        order=252,
+        order=452,
         ),
 
     arp_sha = dict(
@@ -297,7 +297,7 @@
         print_type="\"p\"",
         conditions="is_arp(match)",
         takes_mask_in_spec=False,
-        order=253,
+        order=453,
         ),
     arp_tha = dict(
         name="arp_tha",
@@ -305,7 +305,7 @@
         print_type="\"p\"",
         conditions="is_arp(match)",
         takes_mask_in_spec=False,
-        order=254,
+        order=454,
         ),
 
     ipv6_src = dict(
@@ -314,7 +314,7 @@
         print_type="\"p\"",
         conditions="is_ipv6(match)",
         takes_mask_in_spec=True,
-        order=350,
+        order=500,
         ),
     ipv6_dst = dict(
         name="ipv6_dst",
@@ -322,7 +322,7 @@
         print_type="\"p\"",
         conditions="is_ipv6(match)",
         takes_mask_in_spec=True,
-        order=351,
+        order=501,
         ),
 
     ipv6_flabel = dict(
@@ -331,7 +331,7 @@
         print_type="PRIx32",
         conditions="is_ipv6(match)",
         takes_mask_in_spec=False, # Comment in openflow.h says True
-        order=360,
+        order=502,
         ),
 
     icmpv6_type = dict(
@@ -340,7 +340,7 @@
         print_type="PRIx8",
         conditions="is_icmp_v6(match)",
         takes_mask_in_spec=False,
-        order=440,
+        order=510,
         ),
     icmpv6_code = dict(
         name="icmpv6_code",
@@ -348,7 +348,7 @@
         print_type="PRIx8",
         conditions="is_icmp_v6(match)",
         takes_mask_in_spec=False,
-        order=441,
+        order=511,
         ),
 
     ipv6_nd_target = dict(
@@ -357,7 +357,7 @@
         print_type="\"p\"",
         conditions="", # fixme
         takes_mask_in_spec=False,
-        order=442,
+        order=512,
         ),
 
     ipv6_nd_sll = dict(
@@ -366,7 +366,7 @@
         print_type="\"p\"",
         conditions="", # fixme
         takes_mask_in_spec=False,
-        order=443,
+        order=520,
         ),
     ipv6_nd_tll = dict(
         name="ipv6_nd_tll",
@@ -374,7 +374,7 @@
         print_type="\"p\"",
         conditions="", # fixme
         takes_mask_in_spec=False,
-        order=444,
+        order=521,
         ),
 
     mpls_label = dict(
@@ -384,7 +384,7 @@
         print_type="PRIx32",
         conditions="",
         takes_mask_in_spec=False,
-        order=500,
+        order=600,
         ),
     mpls_tc = dict(
         name="mpls_tc",
@@ -393,7 +393,71 @@
         print_type="PRIx8",
         conditions="",
         takes_mask_in_spec=False,
-        order=501,
+        order=601,
+        ),
+
+    bsn_in_ports_128 = dict(
+        name="bsn_in_ports_128",
+        m_type="of_bitmap_128_t",
+        v2_wc_shift=9,
+        print_type="p",
+        conditions="",
+        takes_mask_in_spec=True,
+        order=1000,
+        ),
+
+    bsn_lag_id = dict(
+        name="bsn_lag_id",
+        m_type="uint32_t",
+        print_type="PRIu32",
+        conditions="",
+        takes_mask_in_spec=False,
+        order=1001,
+        ),
+
+    bsn_vrf = dict(
+        name="bsn_vrf",
+        m_type="uint32_t",
+        print_type="PRIu32",
+        conditions="",
+        takes_mask_in_spec=False,
+        order=1002,
+        ),
+
+    bsn_global_vrf_allowed = dict(
+        name="bsn_global_vrf_allowed",
+        m_type="uint8_t",
+        print_type="PRIu8",
+        conditions="",
+        takes_mask_in_spec=False,
+        order=1003,
+        ),
+
+    bsn_l3_interface_class_id = dict(
+        name="bsn_l3_interface_class_id",
+        m_type="uint32_t",
+        print_type="PRIu32",
+        conditions="",
+        takes_mask_in_spec=True,
+        order=1003,
+        ),
+
+    bsn_l3_src_class_id = dict(
+        name="bsn_l3_src_class_id",
+        m_type="uint32_t",
+        print_type="PRIu32",
+        conditions="",
+        takes_mask_in_spec=True,
+        order=1004,
+        ),
+
+    bsn_l3_dst_class_id = dict(
+        name="bsn_l3_dst_class_id",
+        m_type="uint32_t",
+        print_type="PRIu32",
+        conditions="",
+        takes_mask_in_spec=True,
+        order=1005,
         ),
 )
 
@@ -465,24 +529,30 @@
                 print "Key %s not found in match struct, v %s" % (key, match_v)
                 sys.exit(1)
 
-    # Check oxm list and the list above
-    for key in oxm.oxm_types:
+    # Generate list of OXM names from the unified classes
+    oxm_names = [x[7:] for x in of_g.unified.keys() if
+                 x.startswith('of_oxm_') and
+                 x.find('masked') < 0 and
+                 x.find('header') < 0]
+
+    # Check that all OXMs are in the match members
+    for key in oxm_names:
         if not key in of_match_members:
             if not (key.find("_masked") > 0):
-                debug("Key %s in oxm.oxm_types, not of_match_members" % key)
+                debug("Key %s in OXM, not of_match_members" % key)
                 sys.exit(1)
             if not key[:-7] in of_match_members:
-                debug("Key %s in oxm.oxm_types, but %s not in of_match_members"
+                debug("Key %s in OXM, but %s not in of_match_members"
                       % (key, key[:-7]))
                 sys.exit(1)
 
+    # Check that all match members are in the OXMs
     for key in of_match_members:
-        if not key in oxm.oxm_types:
-            debug("Key %s in of_match_members, not in oxm.oxm_types" % key)
+        if not key in oxm_names:
+            debug("Key %s in of_match_members, not in OXM" % key)
             sys.exit(1)
-        if of_match_members[key]["m_type"] != oxm.oxm_types[key]:
+        oxm_type = of_g.unified['of_oxm_%s' % key]['union']['value']['m_type']
+        if of_match_members[key]["m_type"] != oxm_type:
             debug("Type mismatch for key %s in oxm data: %s vs %s" %
-                  (key, of_match_members[key]["m_type"], oxm.oxm_types[key]))
+                  (key, of_match_members[key]["m_type"], oxm_type))
             sys.exit(1)
-
-
diff --git a/of_g.py b/c_gen/of_g_legacy.py
similarity index 66%
rename from of_g.py
rename to c_gen/of_g_legacy.py
index 6c8144f..1fadba0 100644
--- a/of_g.py
+++ b/c_gen/of_g_legacy.py
@@ -32,7 +32,6 @@
 #
 
 import sys
-from optparse import OptionParser
 # @fixme Replace with argparse
 
 ################################################################
@@ -48,98 +47,14 @@
 wire_ver_map = {}
 
 ##
-# Command line options
-options = {}
-
-##
-# Command line arguments
-args = []
-
-##@var config_default
-# The default configuration dictionary for LOXI code generation
-options_default = {
-    "lang"               : "c",
-    "version-list"       : "1.0 1.1 1.2 1.3",
-    "install-dir"        : "loxi_output",
-}
-
-##
 # The list of wire versions which are to be supported
 target_version_list = []
 
-def lang_normalize(lang):
-    """
-    Normalize the representation of the language 
-    """
-    return lang.lower()
-
-def version_list_normalize(vlist):
-    """
-    Normalize the version list and return as an array
-    """
-    out_list = []
-    # @fixme Map to OF version references
-    if vlist.find(',') > 0:
-        vlist = vlist.split(',')
-    else:
-        vlist = vlist.split()
-    vlist.sort()
-    for ver in vlist:
-        try:
-            out_list.append(of_param_version_map[ver])
-        except KeyError:
-            sys.stderr.write("Bad version input, %s" % str(ver))
-            sys.exit(1)
-
-    return out_list
-
-def process_commandline(default_vals=options_default):
-    """
-    Set up the options dictionary
-
-    @param cfg_dflt The default configuration dictionary
-    @return A pair (options, args) as per parser return
-    """
-    global options
-    global args
-    global target_version_list
-
-    parser = OptionParser(version="%prog 0.1")
-
-    #@todo Add options via dictionary
-    parser.add_option("--list-files", action="store_true", default=False,
-                      help="List output files generated")
-    parser.add_option("-l", "--lang", "--language",
-                      default=default_vals["lang"],
-                      help="Select the target language: c, python")
-    parser.add_option("-i", "--install-dir",
-                      default=default_vals["install-dir"],
-                      help="Directory to install generated files to (default %s)" % default_vals["install-dir"])
-    parser.add_option("-v", "--version-list", 
-                      default=default_vals["version-list"],
-                      help="Specify the versions to target as 1.0 1.1 etc")
-
-    (options, args) = parser.parse_args()
-
-    options.lang = lang_normalize(options.lang)
-    target_version_list = version_list_normalize(options.version_list)
-    target_version_list.sort()
-    return (options, args)
-
 ##
 # The dictionary of config variables related to code
 #
-# @param gen_unified_fns  Boolean; Generate top level function definitions for
-# accessors which are independent of the version; the alternative is to only 
-# use the function pointers in the class definitions.  These functions support
-# better inlining optimizations.
-#
-# @param gen_fn_ptrs Boolean; Generate the functions pointed to by pointer
-# in the class (struct) definitions; the alternative is to only use the
-# unified (use_) functions
-#
 # @param use_obj_id  Use object IDs in struct defns   CURRENTLY NOT SUPPORTED
-# 
+#
 # @param return_base_types For 'get' accessors, return values when possible.
 # Otherwise all values are returned thru a call by variable parameter
 #
@@ -152,30 +67,17 @@
 # @param encode_typedefs Use object and member IDs (rather than names)
 # when generating the names used for accessor function typedefs
 #
-# @param get_returns One of "error", "value", or "void"; 
+# @param get_returns One of "error", "value", or "void";
 # CURRENTLY ONLY "error" IS SUPPORTED.  "error" means
 # all get operations return an error code.  "value" means return a base_type
 # value when possible or void if not.  "void" means always return void
 # and use a call-by-variable parameter
 #
 
-# @fixme These are still very C specific and should probably either
-# go into lang_c.py or be swallowed by command line option parsing
-code_gen_config = dict(
-    gen_unified_fns=True,
-#    gen_fn_ptrs=True,  # WARNING: Haven't tested with this in a while
-    gen_fn_ptrs=False,
-    use_obj_id=False,
-    use_static_inlines=False,
-    copy_semantics="read",  # Only read implemented: read, write, grow
-    encoded_typedefs=False,
-    get_returns="error",   # Only error implemented; error, value, void
-)
-
 ## These members do not get normal accessors
 
-skip_members = ["version", "type", "length", "stats_type", "len",
-                "type_len", "actions_len", "_command"]
+skip_members = ["version", "type", "length", "err_type", "stats_type", "len",
+                "type_len", "actions_len", "_command", "command", "key_length"]
 
 ## Some OpenFlow string length constants
 #
@@ -200,8 +102,8 @@
 # the value is the name of the type to use for that version
 #
 # This is the map between the external type (like of_port_no_t)
-# which is used by customers of this code and the internal 
-# datatypes (like uint16_t) that appear on the wire for a 
+# which is used by customers of this code and the internal
+# datatypes (like uint16_t) that appear on the wire for a
 # particular version.
 #
 of_mixed_types = dict(
@@ -219,6 +121,13 @@
         4: "of_port_desc_t",
         "short_name":"port_desc"
         },
+    of_bsn_vport_t = {
+        1: "of_bsn_vport_t",
+        2: "of_bsn_vport_t",
+        3: "of_bsn_vport_t",
+        4: "of_bsn_vport_t",
+        "short_name":"bsn_vport"
+        },
     of_fm_cmd_t = { # Flow mod command went from u16 to u8
         1: "uint16_t",
         2: "uint8_t",
@@ -273,6 +182,7 @@
 #    of_counter_t = dict(bytes=8, to_w="u64_hton", from_w="u64_ntoh", use_as_rv=1,
 #                    short_name="counter"),
     of_mac_addr_t = dict(bytes=6, short_name="mac"),
+    of_ipv4_t = dict(bytes=4, short_name="ipv4"),
     of_ipv6_t = dict(bytes=16, short_name="ipv6"),
     of_port_name_t = dict(bytes=ofp_constants["OF_MAX_PORT_NAME_LEN"],
                           short_name="port_name"),
@@ -282,67 +192,28 @@
                          short_name="desc_str"),
     of_serial_num_t = dict(bytes=ofp_constants["OF_SERIAL_NUM_LEN"],
                            short_name="ser_num"),
-    of_match_v1_t = dict(bytes=40, to_w="match_v1_hton", 
-                         from_w="match_v1_ntoh", 
+    of_match_v1_t = dict(bytes=40, to_w="match_v1_hton",
+                         from_w="match_v1_ntoh",
                          short_name="match_v1"),
-    of_match_v2_t = dict(bytes=88, to_w="match_v2_hton", 
-                         from_w="match_v2_ntoh", 
+    of_match_v2_t = dict(bytes=88, to_w="match_v2_hton",
+                         from_w="match_v2_ntoh",
                          short_name="match_v2"),
-    of_match_v3_t = dict(bytes=-1, to_w="match_v3_hton", 
-                         from_w="match_v3_ntoh", 
+    of_match_v3_t = dict(bytes=-1, to_w="match_v3_hton",
+                         from_w="match_v3_ntoh",
                          short_name="match_v3"),
-#    of_match_v4_t = dict(bytes=-1, to_w="match_v4_hton", 
-#                         from_w="match_v4_ntoh", 
+#    of_match_v4_t = dict(bytes=-1, to_w="match_v4_hton",
+#                         from_w="match_v4_ntoh",
 #                         short_name="match_v4"),
-    of_octets_t = dict(bytes=-1, short_name="octets")
+    of_octets_t = dict(bytes=-1, short_name="octets"),
+    of_bitmap_128_t = dict(bytes=16, short_name="bitmap_128"),
+    of_checksum_128_t = dict(bytes=16, short_name="checksum_128"),
 )
 
 of_scalar_types = ["char", "uint8_t", "uint16_t", "uint32_t", "uint64_t",
                    "of_port_no_t", "of_fm_cmd_t", "of_wc_bmap_t",
                    "of_match_bmap_t", "of_port_name_t", "of_table_name_t",
-                   "of_desc_str_t", "of_serial_num_t", "of_mac_addr_t", 
-                   "of_ipv6_t"]
-
-base_object_members = """\
-    /* The control block for the underlying data buffer */
-    of_wire_object_t wire_object;
-    /* The LOCI type enum value of the object */
-    of_object_id_t object_id;
-
-    /*
-     * Objects need to track their "parent" so that updates to the
-     * object that affect its length can be pushed to the parent.
-     * Treat as private.
-     */
-    of_object_t *parent;
-
-    /*
-     * Not all objects have length and version on the wire so we keep
-     * them here.  NOTE: Infrastructure manages length and version.
-     * Treat length as private and version as read only.
-     */
-    int length;
-    of_version_t version;
-
-    /*
-     * Many objects have a length and/or type represented in the wire buffer
-     * These accessors get and set those value when present.  Treat as private.
-     */
-    of_wire_length_get_f wire_length_get;
-    of_wire_length_set_f wire_length_set;
-    of_wire_type_get_f wire_type_get;
-    of_wire_type_set_f wire_type_set;
-
-    of_object_track_info_t track_info;
-
-    /*
-     * Metadata available for applications.  Ensure 8-byte alignment, but
-     * that buffer is at least as large as requested.  This data is not used
-     * or inspected by LOCI.
-     */
-    uint64_t metadata[(OF_OBJECT_METADATA_BYTES + 7) / 8];
-"""
-
+                   "of_desc_str_t", "of_serial_num_t", "of_mac_addr_t",
+                   "of_ipv6_t", "of_ipv4_t", "of_bitmap_128_t", "of_checksum_128_t"]
 
 ##
 # LOXI identifiers
@@ -394,7 +265,13 @@
 ## Map from class, wire_version to size of fixed part of class
 base_length = {}
 
-## Boolean indication of variable length, per class, wire_version, 
+## Map from class, wire_version to size of variable-offset, fixed length part of class
+extra_length = {
+    ("of_packet_in", 3): 2,
+    ("of_packet_in", 4): 2,
+}
+
+## Boolean indication of variable length, per class, wire_version,
 is_fixed_length = set()
 
 ## The global object ID counter
@@ -467,7 +344,7 @@
 #
 # Experimenters, vendors, extensions
 #
-# Although the term "experimenter" is used for identifying 
+# Although the term "experimenter" is used for identifying
 # external extension definitions, we generally use the term
 # extension when refering to the messages or objects themselves.
 #
@@ -506,20 +383,3 @@
 
 loxigen_dbg_file = sys.stdout
 loxigen_log_file = sys.stdout
-
-################################################################
-#
-# Internal representation
-#
-################################################################
-
-class OFInput(object):
-    """
-    A single LOXI input file.
-    """
-
-    def __init__(self):
-        self.wire_versions = set()
-        self.classes = {}
-        self.ordered_classes = []
-        self.enums = {}
diff --git a/py_gen/templates/_pack_packet_out.py b/c_gen/templates/_copyright.c
similarity index 67%
copy from py_gen/templates/_pack_packet_out.py
copy to c_gen/templates/_copyright.c
index ad8b827..4d38f6d 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/c_gen/templates/_copyright.c
@@ -25,15 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University */
+/* Copyright (c) 2011, 2012 Open Networking Foundation */
+/* Copyright (c) 2012, 2013 Big Switch Networks, Inc. */
+/* See the file LICENSE.loci which should have been included in the source distribution */
diff --git a/py_gen/templates/_pack_packet_out.py b/c_gen/templates/_pragmas.c
similarity index 67%
copy from py_gen/templates/_pack_packet_out.py
copy to c_gen/templates/_pragmas.c
index ad8b827..cbdc350 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/c_gen/templates/_pragmas.c
@@ -25,15 +25,26 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+#ifdef __GNUC__
+
+#ifdef __linux__
+/* glibc */
+#include <features.h>
+#else
+/* NetBSD etc */
+#include <sys/cdefs.h>
+#ifdef __GNUC_PREREQ__
+#define __GNUC_PREREQ __GNUC_PREREQ__
+#endif
+#endif
+
+#ifndef __GNUC_PREREQ
+/* fallback */
+#define __GNUC_PREREQ(maj, min) 0
+#endif
+
+#if __GNUC_PREREQ(4,6)
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#endif
+
+#endif
diff --git a/py_gen/templates/common.py b/c_gen/templates/_push_wire_types.c
similarity index 60%
rename from py_gen/templates/common.py
rename to c_gen/templates/_push_wire_types.c
index c9af309..5b16e24 100644
--- a/py_gen/templates/common.py
+++ b/c_gen/templates/_push_wire_types.c
@@ -25,37 +25,31 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-:: include('_copyright.py')
-
-:: include('_autogen.py')
-
-import sys
-import struct
-import action
-import const
-import util
-
-# HACK make this module visible as 'common' to simplify code generation
-common = sys.modules[__name__]
-
-def unpack_list_flow_stats_entry(buf):
-    return util.unpack_list(flow_stats_entry.unpack, "!H", buf)
-
-def unpack_list_queue_prop(buf):
-    def deserializer(buf):
-        type, = struct.unpack_from("!H", buf)
-        if type == const.OFPQT_MIN_RATE:
-            return queue_prop_min_rate.unpack(buf)
-        else:
-            raise loxi.ProtocolError("unknown queue prop %d" % type)
-    return util.unpack_list(deserializer, "!2xH", buf)
-
-def unpack_list_packet_queue(buf):
-    return util.unpack_list(packet_queue.unpack, "!4xH", buf)
-
-:: for ofclass in ofclasses:
-:: include('_ofclass.py', ofclass=ofclass, superclass="object")
-
+static void
+${data.class_name}_push_wire_types(of_object_t *obj)
+{
+    unsigned char *buf = OF_OBJECT_BUFFER_INDEX(obj, 0);
+    switch (obj->version) {
+:: for ms, versions in data.versioned_type_members:
+:: for version in versions:
+    case ${version.constant_version(prefix='OF_VERSION_')}:
 :: #endfor
-
-match = match_v1
+:: for m in ms:
+:: if m.length == 1:
+        *(uint8_t *)(buf + ${m.offset}) = ${m.value}; /* ${m.name} */
+:: elif m.length == 2:
+        *(uint16_t *)(buf + ${m.offset}) = htobe16(${m.value}); /* ${m.name} */
+:: elif m.length == 4:
+        *(uint32_t *)(buf + ${m.offset}) = htobe32(${m.value}); /* ${m.name} */
+:: elif m.length == 8:
+        *(uint64_t *)(buf + ${m.offset}) = htobe64(${m.value}); /* ${m.name} */
+:: else:
+:: raise("unsupported push_wire_types length %d" % m.length)
+:: #endif
+:: #endfor
+        break;
+:: #endfor
+    default:
+        UNREACHABLE();
+    }
+}
diff --git a/c_gen/templates/bsn_ext.h b/c_gen/templates/bsn_ext.h
index 6afa211..d810ab3 100644
--- a/c_gen/templates/bsn_ext.h
+++ b/c_gen/templates/bsn_ext.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  * BSN OpenFlow extension definition header file
diff --git a/py_gen/templates/_pack_packet_out.py b/c_gen/templates/class.c
similarity index 67%
copy from py_gen/templates/_pack_packet_out.py
copy to c_gen/templates/class.c
index ad8b827..044e03e 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/c_gen/templates/class.c
@@ -25,15 +25,13 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+:: include('_copyright.c')
+:: include('_pragmas.c')
+
+#include "loci_log.h"
+#include "loci_int.h"
+
+:: if push_wire_types_data:
+:: include("_push_wire_types.c", data=push_wire_types_data)
+
+:: #endif
diff --git a/py_gen/templates/_pack_packet_out.py b/c_gen/templates/loci_classes.h
similarity index 67%
copy from py_gen/templates/_pack_packet_out.py
copy to c_gen/templates/loci_classes.h
index ad8b827..3486055 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/c_gen/templates/loci_classes.h
@@ -25,15 +25,11 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+:: include('_copyright.c')
+::
+#ifndef __LOCI_CLASSES_H__
+#define __LOCI_CLASSES_H__
+
+${legacy_code}
+
+#endif
diff --git a/c_gen/templates/loci_dox.h b/c_gen/templates/loci_dox.h
index 86c29db..d37a6c1 100644
--- a/c_gen/templates/loci_dox.h
+++ b/c_gen/templates/loci_dox.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**************************************************************************//**
  * 
diff --git a/c_gen/templates/loci_dump.h b/c_gen/templates/loci_dump.h
index 12bba6e..9cc719c 100644
--- a/c_gen/templates/loci_dump.h
+++ b/c_gen/templates/loci_dump.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 #if !defined(_LOCI_DUMP_H_)
 #define _LOCI_DUMP_H_
@@ -94,6 +94,10 @@
 int loci_dump_match(loci_writer_f writer, void* cookie, of_match_t *match);
 #define LOCI_DUMP_match(writer, cookie, val) loci_dump_match(writer, cookie, &val)
 
+#define LOCI_DUMP_bitmap_128(writer, cookie, val) writer(cookie, "%" PRIx64 "%" PRIx64, (val).hi, (val).lo)
+
+#define LOCI_DUMP_checksum_128(writer, cookie, val) writer(cookie, "%016" PRIx64 "%016" PRIx64, (val).hi, (val).lo)
+
 /**
  * Generic version for any object
  */
diff --git a/py_gen/templates/_pack_packet_out.py b/c_gen/templates/loci_init_map.c
similarity index 66%
copy from py_gen/templates/_pack_packet_out.py
copy to c_gen/templates/loci_init_map.c
index ad8b827..dc4616d 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/c_gen/templates/loci_init_map.c
@@ -24,16 +24,18 @@
 :: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
-::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+:: include('_copyright.c')
+#include <loci/loci.h>
+#include <loci/of_object.h>
+#include "loci_log.h"
+#include "loci_int.h"
+
+/**
+ * Map from object ID to type coerce function
+ */
+const of_object_init_f of_object_init_map[] = {
+    (of_object_init_f)NULL,
+:: for cls in classes:
+    (of_object_init_f)${cls}_init,
+:: #endfor
+};
diff --git a/c_gen/templates/loci_int.h b/c_gen/templates/loci_int.h
index ce5ecb0..01ad4a8 100644
--- a/c_gen/templates/loci_int.h
+++ b/c_gen/templates/loci_int.h
@@ -25,7 +25,14 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
+:: import c_gen.of_g_legacy as of_g
+:: flow_mod = of_g.base_length[("of_flow_modify",of_g.VERSION_1_2)]
+:: packet_in = of_g.base_length[("of_packet_in",of_g.VERSION_1_2)]
+:: packet_in_1_3 = of_g.base_length[("of_packet_in",of_g.VERSION_1_3)]
+:: flow_stats = of_g.base_length[("of_flow_stats_entry", of_g.VERSION_1_2)]
+:: match1 = of_g.base_length[("of_match_v1",of_g.VERSION_1_0)]
+:: match2 = of_g.base_length[("of_match_v2",of_g.VERSION_1_1)]
 
 /******************************************************************************
  *
@@ -37,9 +44,254 @@
 #ifndef __LOCI_INT_H__
 #define __LOCI_INT_H__
 
+#include <loci/loci.h>
 
+#ifdef __GNUC__
+#define UNREACHABLE() __builtin_unreachable()
+#else
+#define UNREACHABLE()
+#endif
 
+/****************************************************************
+ * Special case macros for calculating variable lengths and offsets
+ ****************************************************************/
 
+/**
+ * Get a u16 directly from an offset in an object's wire buffer
+ * @param obj An of_object_t object
+ * @param offset Base offset of the uint16 relative to the object
+ *
+ */
 
-#include <loci/loci.h> 
+static inline int
+of_object_u16_get(of_object_t *obj, int offset) {
+    uint16_t val16;
+
+    of_wire_buffer_u16_get(obj->wire_object.wbuf,
+        obj->wire_object.obj_offset + offset, &val16);
+
+    return (int)val16;
+}
+
+/**
+ * Set a u16 directly at an offset in an object's wire buffer
+ * @param obj An of_object_t object
+ * @param offset Base offset of the uint16 relative to the object
+ * @param val The value to store
+ *
+ */
+
+static inline void
+of_object_u16_set(of_object_t *obj, int offset, int value) {
+    uint16_t val16;
+
+    val16 = (uint16_t)value;
+    of_wire_buffer_u16_set(obj->wire_object.wbuf,
+        obj->wire_object.obj_offset + offset, val16);
+}
+
+/**
+ * Get length of an object with a TLV header with uint16_t
+ * @param obj An object with a match member
+ * @param offset The wire offset of the start of the object
+ *
+ * The length field follows the type field.
+ */
+
+#define _TLV16_LEN(obj, offset) \
+    (of_object_u16_get((of_object_t *)(obj), (offset) + 2))
+
+/**
+ * Get length of an object that is the "rest" of the object
+ * @param obj An object with a match member
+ * @param offset The wire offset of the start of the object
+ *
+ */
+
+#define _END_LEN(obj, offset) ((obj)->length - (offset))
+
+/**
+ * Offset of the action_len member in a packet-out object
+ */
+
+#define _PACKET_OUT_ACTION_LEN_OFFSET(obj) \
+    (((obj)->version == OF_VERSION_1_0) ? 14 : 16)
+
+/**
+ * Get length of the action list object in a packet_out object
+ * @param obj An object of type of_packet_out
+ */
+
+#define _PACKET_OUT_ACTION_LEN(obj) \
+    (of_object_u16_get((of_object_t *)(obj), _PACKET_OUT_ACTION_LEN_OFFSET(obj)))
+
+/**
+ * Set length of the action list object in a packet_out object
+ * @param obj An object of type of_packet_out
+ */
+
+#define _PACKET_OUT_ACTION_LEN_SET(obj, len) \
+    (of_object_u16_set((of_object_t *)(obj), _PACKET_OUT_ACTION_LEN_OFFSET(obj), len))
+
+/*
+ * Match structs in 1.2 come at the end of the fixed length part
+ * of structures.  They add 8 bytes to the minimal length of the
+ * message, but are also variable length.  This means that the
+ * type/length offsets are 8 bytes back from the end of the fixed
+ * length part of the object.  The right way to handle this is to
+ * expose the offset of the match member more explicitly.  For now,
+ * we make the calculation as described here.
+ */
+
+/* 1.2 min length of match is 8 bytes */
+#define _MATCH_MIN_LENGTH_V3 8
+
+/**
+ * The offset of a 1.2 match object relative to fixed length of obj
+ */
+#define _MATCH_OFFSET_V3(fixed_obj_len) \
+    ((fixed_obj_len) - _MATCH_MIN_LENGTH_V3)
+
+/**
+ * The "extra" length beyond the minimal 8 bytes of a match struct
+ * in an object
+ */
+#define _MATCH_EXTRA_LENGTH_V3(obj, fixed_obj_len) \
+    (OF_MATCH_BYTES(_TLV16_LEN(obj, _MATCH_OFFSET_V3(fixed_obj_len))) - \
+     _MATCH_MIN_LENGTH_V3)
+
+/**
+ * The offset of an object following a match object for 1.2
+ */
+#define _OFFSET_FOLLOWING_MATCH_V3(obj, fixed_obj_len) \
+    ((fixed_obj_len) + _MATCH_EXTRA_LENGTH_V3(obj, fixed_obj_len))
+
+/**
+ * Get length of a match object from its wire representation
+ * @param obj An object with a match member
+ * @param match_offset The wire offset of the match object.
+ *
+ * See above; for 1.2,
+ * The match length is raw bytes but the actual space it takes
+ * up is padded for alignment to 64-bits
+ */
+#define _WIRE_MATCH_LEN(obj, match_offset) \
+    (((obj)->version == OF_VERSION_1_0) ? ${match1} : \
+     (((obj)->version == OF_VERSION_1_1) ? ${match2} : \
+      _TLV16_LEN(obj, match_offset)))
+
+#define _WIRE_LEN_MIN 4
+
+/*
+ * Wrapper function for match len.  There are cases where the wire buffer
+ * has not been set with the proper minimum length.  In this case, the
+ * wire match len is interpretted as its minimum length, 4 bytes.
+ */
+
+static inline int
+wire_match_len(of_object_t *obj, int match_offset) {
+    int len;
+
+    len = _WIRE_MATCH_LEN(obj, match_offset);
+
+    return (len == 0) ? _WIRE_LEN_MIN : len;
+}
+
+#define _WIRE_MATCH_PADDED_LEN(obj, match_offset) \
+    OF_MATCH_BYTES(wire_match_len((of_object_t *)(obj), (match_offset)))
+
+/**
+ * Macro to calculate variable offset of instructions member in flow mod
+ * @param obj An object of some type of flow modify/add/delete
+ *
+ * Get length of preceding match object and add to fixed length
+ * Applies only to version 1.2
+ */
+
+#define _FLOW_MOD_INSTRUCTIONS_OFFSET(obj) \
+    _OFFSET_FOLLOWING_MATCH_V3(obj, ${flow_mod})
+
+/* The different flavors of flow mod all use the above */
+#define _FLOW_ADD_INSTRUCTIONS_OFFSET(obj) \
+    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
+#define _FLOW_MODIFY_INSTRUCTIONS_OFFSET(obj) \
+    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
+#define _FLOW_MODIFY_STRICT_INSTRUCTIONS_OFFSET(obj) \
+    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
+#define _FLOW_DELETE_INSTRUCTIONS_OFFSET(obj) \
+    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
+#define _FLOW_DELETE_STRICT_INSTRUCTIONS_OFFSET(obj) \
+    _FLOW_MOD_INSTRUCTIONS_OFFSET(obj)
+
+/**
+ * Macro to calculate variable offset of instructions member in flow stats
+ * @param obj An object of type of_flow_mod_t
+ *
+ * Get length of preceding match object and add to fixed length
+ * Applies only to version 1.2 and 1.3
+ */
+
+#define _FLOW_STATS_ENTRY_INSTRUCTIONS_OFFSET(obj) \
+    _OFFSET_FOLLOWING_MATCH_V3(obj, ${flow_stats})
+
+/**
+ * Macro to calculate variable offset of data (packet) member in packet_in
+ * @param obj An object of type of_packet_in_t
+ *
+ * Get length of preceding match object and add to fixed length
+ * Applies only to version 1.2 and 1.3
+ * The +2 comes from the 2 bytes of padding between the match and packet data.
+ */
+
+#define _PACKET_IN_DATA_OFFSET(obj) \
+    (_OFFSET_FOLLOWING_MATCH_V3((obj), (obj)->version == OF_VERSION_1_2 ? \
+${packet_in} : ${packet_in_1_3}) + 2)
+
+/**
+ * Macro to calculate variable offset of data (packet) member in packet_out
+ * @param obj An object of type of_packet_out_t
+ *
+ * Find the length in the actions_len variable and add to the fixed len
+ * Applies only to version 1.2 and 1.3
+ */
+
+#define _PACKET_OUT_DATA_OFFSET(obj) (_PACKET_OUT_ACTION_LEN(obj) + \
+     of_object_fixed_len[(obj)->version][OF_PACKET_OUT])
+
+/**
+ * Macro to map port numbers that changed across versions
+ * @param port The port_no_t variable holding the value
+ * @param ver The OpenFlow version from which the value was extracted
+ */
+#define OF_PORT_NO_VALUE_CHECK(port, ver) \
+    if (((ver) == OF_VERSION_1_0) && ((port) > 0xff00)) (port) += 0xffff0000
+
+/**
+ * Macro to detect if an object ID falls in the "flow mod" family of objects
+ * This includes add, modify, modify_strict, delete and delete_strict
+ */
+#define IS_FLOW_MOD_SUBTYPE(object_id)                 \
+    (((object_id) == OF_FLOW_MODIFY) ||                \
+     ((object_id) == OF_FLOW_MODIFY_STRICT) ||         \
+     ((object_id) == OF_FLOW_DELETE) ||                \
+     ((object_id) == OF_FLOW_DELETE_STRICT) ||         \
+     ((object_id) == OF_FLOW_ADD))
+
+/**
+ * Macro to calculate variable offset of value member in of_bsn_gentable_entry_add
+ * @param obj An object of type of_bsn_gentable_entry_add_t
+ */
+
+#define _BSN_GENTABLE_ENTRY_ADD_VALUE_OFFSET(obj) \
+    (of_object_u16_get(obj, 18) + \
+        of_object_fixed_len[(obj)->version][OF_BSN_GENTABLE_ENTRY_ADD])
+
+#define _BSN_GENTABLE_ENTRY_DESC_STATS_ENTRY_VALUE_OFFSET(obj) \
+    (of_object_u16_get(obj, 2) + \
+        of_object_fixed_len[(obj)->version][OF_BSN_GENTABLE_ENTRY_DESC_STATS_ENTRY])
+
+#define _BSN_GENTABLE_ENTRY_STATS_ENTRY_STATS_OFFSET(obj) \
+    (of_object_u16_get(obj, 2) + \
+        of_object_fixed_len[(obj)->version][OF_BSN_GENTABLE_ENTRY_STATS_ENTRY])
+
 #endif /* __LOCI_INT_H__ */
diff --git a/c_gen/templates/loci_log.c b/c_gen/templates/loci_log.c
index cf34eae..2a06811 100644
--- a/c_gen/templates/loci_log.c
+++ b/c_gen/templates/loci_log.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 #include <stdarg.h>
 
diff --git a/c_gen/templates/loci_log.h b/c_gen/templates/loci_log.h
index 3e22d81..14a68cf 100644
--- a/c_gen/templates/loci_log.h
+++ b/c_gen/templates/loci_log.h
@@ -25,15 +25,11 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 #if !defined(_LOCI_LOG_H_)
 #define _LOCI_LOG_H_
 
-#include <loci/loci_base.h>
-#include <loci/of_match.h>
-#include <stdio.h>
-
 /* g++ requires this to pick up PRI, etc.
  * See  http://gcc.gnu.org/ml/gcc-help/2006-10/msg00223.html
  */
diff --git a/c_gen/templates/loci_setup_from_add_fns.c b/c_gen/templates/loci_setup_from_add_fns.c
new file mode 100644
index 0000000..5f63d29
--- /dev/null
+++ b/c_gen/templates/loci_setup_from_add_fns.c
@@ -0,0 +1,250 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+:: include('_copyright.c')
+#include <loci/loci.h>
+#include <loci/of_object.h>
+#include "loci_log.h"
+#include "loci_int.h"
+
+/* Flow stats entry setup for all versions */
+
+static int
+flow_stats_entry_setup_from_flow_add_common(of_flow_stats_entry_t *obj,
+                                            of_flow_add_t *flow_add,
+                                            of_object_t *effects,
+                                            int entry_match_offset,
+                                            int add_match_offset)
+{
+    int entry_len, add_len;
+    of_wire_buffer_t *wbuf;
+    int abs_offset;
+    int delta;
+    uint16_t val16;
+    uint64_t cookie;
+    of_octets_t match_octets;
+
+    /* Transfer the match underlying object from add to stats entry */
+    wbuf = OF_OBJECT_TO_WBUF(obj);
+    entry_len = _WIRE_MATCH_PADDED_LEN(obj, entry_match_offset);
+    add_len = _WIRE_MATCH_PADDED_LEN(flow_add, add_match_offset);
+
+    match_octets.bytes = add_len;
+    match_octets.data = OF_OBJECT_BUFFER_INDEX(flow_add, add_match_offset);
+
+    /* Copy data into flow entry */
+    abs_offset = OF_OBJECT_ABSOLUTE_OFFSET(obj, entry_match_offset);
+    of_wire_buffer_replace_data(wbuf, abs_offset, entry_len,
+                                match_octets.data, add_len);
+
+    /* Not scalar, update lengths if needed */
+    delta = add_len - entry_len;
+    if (delta != 0) {
+        /* Update parent(s) */
+        of_object_parent_length_update((of_object_t *)obj, delta);
+    }
+
+    of_flow_add_cookie_get(flow_add, &cookie);
+    of_flow_stats_entry_cookie_set(obj, cookie);
+
+    of_flow_add_priority_get(flow_add, &val16);
+    of_flow_stats_entry_priority_set(obj, val16);
+
+    of_flow_add_idle_timeout_get(flow_add, &val16);
+    of_flow_stats_entry_idle_timeout_set(obj, val16);
+
+    of_flow_add_hard_timeout_get(flow_add, &val16);
+    of_flow_stats_entry_hard_timeout_set(obj, val16);
+
+    /* Effects may come from different places */
+    if (effects != NULL) {
+        if (obj->version == OF_VERSION_1_0) {
+            OF_TRY(of_flow_stats_entry_actions_set(obj,
+                (of_list_action_t *)effects));
+        } else {
+            OF_TRY(of_flow_stats_entry_instructions_set(obj,
+                (of_list_instruction_t *)effects));
+        }
+    } else {
+        if (obj->version == OF_VERSION_1_0) {
+            of_list_action_t actions;
+            of_flow_add_actions_bind(flow_add, &actions);
+            OF_TRY(of_flow_stats_entry_actions_set(obj, &actions));
+        } else {
+            of_list_instruction_t instructions;
+            of_flow_add_instructions_bind(flow_add, &instructions);
+            OF_TRY(of_flow_stats_entry_instructions_set(obj, &instructions));
+        }
+    }
+
+    return OF_ERROR_NONE;
+}
+
+/* Flow removed setup for all versions */
+
+static int
+flow_removed_setup_from_flow_add_common(of_flow_removed_t *obj,
+                                        of_flow_add_t *flow_add,
+                                        int removed_match_offset,
+                                        int add_match_offset)
+{
+    int add_len, removed_len;
+    of_wire_buffer_t *wbuf;
+    int abs_offset;
+    int delta;
+    uint16_t val16;
+    uint64_t cookie;
+    of_octets_t match_octets;
+
+    /* Transfer the match underlying object from add to removed obj */
+    wbuf = OF_OBJECT_TO_WBUF(obj);
+    removed_len = _WIRE_MATCH_PADDED_LEN(obj, removed_match_offset);
+    add_len = _WIRE_MATCH_PADDED_LEN(flow_add, add_match_offset);
+
+    match_octets.bytes = add_len;
+    match_octets.data = OF_OBJECT_BUFFER_INDEX(flow_add, add_match_offset);
+
+    /* Copy data into flow removed */
+    abs_offset = OF_OBJECT_ABSOLUTE_OFFSET(obj, removed_match_offset);
+    of_wire_buffer_replace_data(wbuf, abs_offset, removed_len,
+                                match_octets.data, add_len);
+
+    /* Not scalar, update lengths if needed */
+    delta = add_len - removed_len;
+    if (delta != 0) {
+        /* Update parent(s) */
+        of_object_parent_length_update((of_object_t *)obj, delta);
+    }
+
+    of_flow_add_cookie_get(flow_add, &cookie);
+    of_flow_removed_cookie_set(obj, cookie);
+
+    of_flow_add_priority_get(flow_add, &val16);
+    of_flow_removed_priority_set(obj, val16);
+
+    of_flow_add_idle_timeout_get(flow_add, &val16);
+    of_flow_removed_idle_timeout_set(obj, val16);
+
+    if (obj->version >= OF_VERSION_1_2) {
+        of_flow_add_hard_timeout_get(flow_add, &val16);
+        of_flow_removed_hard_timeout_set(obj, val16);
+    }
+
+    return OF_ERROR_NONE;
+}
+
+/* Set up a flow removed message from the original add */
+
+int
+of_flow_removed_setup_from_flow_add(of_flow_removed_t *obj,
+                                    of_flow_add_t *flow_add)
+{
+    switch (obj->version) {
+    case OF_VERSION_1_0:
+        return flow_removed_setup_from_flow_add_common(obj, flow_add,
+                                                       8, 8);
+        break;
+    case OF_VERSION_1_1:
+    case OF_VERSION_1_2:
+    case OF_VERSION_1_3:
+        return flow_removed_setup_from_flow_add_common(obj, flow_add,
+                                                       48, 48);
+        break;
+    default:
+        return OF_ERROR_VERSION;
+        break;
+    }
+
+    return OF_ERROR_NONE;
+}
+
+
+/* Set up a packet in message from the original add */
+
+int
+of_packet_in_setup_from_flow_add(of_packet_in_t *obj,
+                                 of_flow_add_t *flow_add)
+{
+    int add_len, pkt_in_len;
+    of_wire_buffer_t *wbuf;
+    int abs_offset;
+    int delta;
+    const int pkt_in_match_offset = 16;
+    const int add_match_offset = 48;
+    of_octets_t match_octets;
+
+    if (obj->version < OF_VERSION_1_2) {
+        /* Nothing to be done before OF 1.2 */
+        return OF_ERROR_NONE;
+    }
+
+    /* Transfer match struct from flow add to packet in object */
+    wbuf = OF_OBJECT_TO_WBUF(obj);
+    pkt_in_len = _WIRE_MATCH_PADDED_LEN(obj, pkt_in_match_offset);
+    add_len = _WIRE_MATCH_PADDED_LEN(flow_add, add_match_offset);
+
+    match_octets.bytes = add_len;
+    match_octets.data = OF_OBJECT_BUFFER_INDEX(flow_add, add_match_offset);
+
+    /* Copy data into pkt_in msg */
+    abs_offset = OF_OBJECT_ABSOLUTE_OFFSET(obj, pkt_in_match_offset);
+    of_wire_buffer_replace_data(wbuf, abs_offset, pkt_in_len,
+                                match_octets.data, add_len);
+
+    /* Not scalar, update lengths if needed */
+    delta = add_len - pkt_in_len;
+    if (delta != 0) {
+        /* Update parent(s) */
+        of_object_parent_length_update((of_object_t *)obj, delta);
+    }
+
+    return OF_ERROR_NONE;
+}
+
+/* Set up a stats entry from the original add */
+
+int
+of_flow_stats_entry_setup_from_flow_add(of_flow_stats_entry_t *obj,
+                                        of_flow_add_t *flow_add,
+                                        of_object_t *effects)
+{
+    switch (obj->version) {
+    case OF_VERSION_1_0:
+        return flow_stats_entry_setup_from_flow_add_common(obj, flow_add,
+                                                           effects, 4, 8);
+        break;
+    case OF_VERSION_1_1:
+    case OF_VERSION_1_2:
+    case OF_VERSION_1_3:
+        return flow_stats_entry_setup_from_flow_add_common(obj, flow_add,
+                                                           effects, 48, 48);
+        break;
+    default:
+        return OF_ERROR_VERSION;
+    }
+
+    return OF_ERROR_NONE;
+}
diff --git a/c_gen/templates/loci_show.h b/c_gen/templates/loci_show.h
index ed7a7d5..e7cc2a8 100644
--- a/c_gen/templates/loci_show.h
+++ b/c_gen/templates/loci_show.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 #if !defined(_LOCI_SHOW_H_)
 #define _LOCI_SHOW_H_
@@ -74,7 +74,7 @@
  */
 #define LOCI_SHOW_u32_ipv6_flabel(writer, cookie, val)     LOCI_SHOW_u32(writer, cookie, val)
 #define LOCI_SHOW_u8_vlan_pcp(writer, cookie, val)         LOCI_SHOW_u8(writer, cookie, val)
-#define LOCI_SHOW_u32_ipv4_src(writer, cookie, val)        LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_ipv4_ipv4_src(writer, cookie, val)        LOCI_SHOW_ipv4(writer, cookie, val)
 #define LOCI_SHOW_ipv6_ipv6_dst(writer, cookie, val)       LOCI_SHOW_ipv6(writer, cookie, val)
 #define LOCI_SHOW_u32_arp_tpa(writer, cookie, val)         LOCI_SHOW_ipv4(writer, cookie, val)
 #define LOCI_SHOW_u8_icmpv6_type(writer, cookie, val)      LOCI_SHOW_u8(writer, cookie, val)
@@ -99,7 +99,7 @@
 #define LOCI_SHOW_u8_ip_ecn(writer, cookie, val)           LOCI_SHOW_u8(writer, cookie, val)
 #define LOCI_SHOW_u16_udp_dst(writer, cookie, val)         LOCI_SHOW_u16(writer, cookie, val)
 #define LOCI_SHOW_port_no_in_phy_port(writer, cookie, val) LOCI_SHOW_port_no(writer, cookie, val)
-#define LOCI_SHOW_u32_ipv4_dst(writer, cookie, val)        LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_ipv4_ipv4_dst(writer, cookie, val)        LOCI_SHOW_ipv4(writer, cookie, val)
 #define LOCI_SHOW_mac_eth_src(writer, cookie, val)         LOCI_SHOW_mac(writer, cookie, val)
 #define LOCI_SHOW_u16_udp_src(writer, cookie, val)         LOCI_SHOW_u16(writer, cookie, val)
 #define LOCI_SHOW_mac_ipv6_nd_tll(writer, cookie, val)     LOCI_SHOW_mac(writer, cookie, val)
@@ -109,6 +109,14 @@
 #define LOCI_SHOW_u8_ip_proto(writer, cookie, val)         LOCI_SHOW_u8(writer, cookie, val)
 #define LOCI_SHOW_u64_metadata(writer, cookie, val)        LOCI_SHOW_x64(writer, cookie, val)
 #define LOCI_SHOW_u8_enabled(writer, cookie, val)          LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u32_vport_no(writer, cookie, val)        LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_port_no(writer, cookie, val)         LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u16_ingress_tpid(writer, cookie, val)    LOCI_SHOW_x16(writer, cookie, val)
+#define LOCI_SHOW_u16_egress_tpid(writer, cookie, val)     LOCI_SHOW_x16(writer, cookie, val)
+#define LOCI_SHOW_u16_ingress_vlan_id(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u16_egress_vlan_id(writer, cookie, val)  LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u32_enabled(writer, cookie, val)         LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_enable(writer, cookie, val)          LOCI_SHOW_u32(writer, cookie, val)
 
 
 
@@ -146,6 +154,7 @@
 #define LOCI_SHOW_string(writer, cookie, val) writer(cookie, "%s", val)
 
 #define LOCI_SHOW_port_name(writer, cookie, val) LOCI_SHOW_string(writer, cookie, val)
+#define LOCI_SHOW_port_name_if_name(writer, cookie, val) LOCI_SHOW_string(writer, cookie, val)
 #define LOCI_SHOW_tab_name(writer, cookie, val) LOCI_SHOW_string(writer, cookie, val)
 #define LOCI_SHOW_desc_str(writer, cookie, val) LOCI_SHOW_string(writer, cookie, val)
 #define LOCI_SHOW_ser_num(writer, cookie, val) LOCI_SHOW_string(writer, cookie, val)
@@ -153,6 +162,10 @@
 int loci_show_match(loci_writer_f writer, void *cookie, of_match_t *match);
 #define LOCI_SHOW_match(writer, cookie, val) loci_show_match(writer, cookie, &val)
 
+#define LOCI_SHOW_bitmap_128(writer, cookie, val) writer(cookie, "%" PRIx64 "%" PRIx64, (val).hi, (val).lo)
+
+#define LOCI_SHOW_checksum_128(writer, cookie, val) writer(cookie, "%016" PRIx64 "%016" PRIx64, (val).hi, (val).lo)
+
 /**
  * Generic version for any object
  */
@@ -183,7 +196,9 @@
 #define LOCI_SHOW_desc_str_sw_desc(writer, cookie, val) LOCI_SHOW_desc_str(writer, cookie, val)
 #define LOCI_SHOW_ser_num_serial_num(writer, cookie, val) LOCI_SHOW_ser_num(writer, cookie, val)
 #define LOCI_SHOW_desc_str_dp_desc(writer, cookie, val) LOCI_SHOW_desc_str(writer, cookie, val)
+#define LOCI_SHOW_desc_str_pipeline(writer, cookie, val) LOCI_SHOW_desc_str(writer, cookie, val)
 #define LOCI_SHOW_octets_data(writer, cookie, val) LOCI_SHOW_octets(writer, cookie, val)
+#define LOCI_SHOW_octets_value(writer, cookie, val) LOCI_SHOW_octets(writer, cookie, val)
 #define LOCI_SHOW_u16_err_type(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
 #define LOCI_SHOW_u16_code(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
 #define LOCI_SHOW_u64_datapath_id(writer, cookie, val) LOCI_SHOW_x64(writer, cookie, val)
@@ -224,7 +239,14 @@
 #define LOCI_SHOW_u32_supported(writer, cookie, val) LOCI_SHOW_x32(writer, cookie, val)
 #define LOCI_SHOW_u32_peer(writer, cookie, val) LOCI_SHOW_x32(writer, cookie, val)
 #define LOCI_SHOW_u64_rx_packets(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_rx_packets_unicast(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_rx_packets_multicast(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_rx_packets_broadcast(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_uint64_value(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
 #define LOCI_SHOW_u64_tx_packets(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_tx_packets_unicast(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_tx_packets_multicast(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_u64_tx_packets_broadcast(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
 #define LOCI_SHOW_u64_rx_bytes(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
 #define LOCI_SHOW_u64_tx_bytes(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
 #define LOCI_SHOW_u64_rx_dropped(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
@@ -258,8 +280,8 @@
 #define LOCI_SHOW_u64_metadata_mask(writer, cookie, val) LOCI_SHOW_x64(writer, cookie, val)
 #define LOCI_SHOW_mac_eth_src_mask(writer, cookie, val) LOCI_SHOW_mac(writer, cookie, val)
 #define LOCI_SHOW_mac_eth_dst_mask(writer, cookie, val) LOCI_SHOW_mac(writer, cookie, val)
-#define LOCI_SHOW_u32_ipv4_src_mask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
-#define LOCI_SHOW_u32_ipv4_dst_mask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_ipv4_ipv4_src_mask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_ipv4_ipv4_dst_mask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
 #define LOCI_SHOW_u32_curr_speed(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
 #define LOCI_SHOW_u32_max_speed(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
 #define LOCI_SHOW_match_bmap_match(writer, cookie, val) LOCI_SHOW_match_bmap(writer, cookie, val)
@@ -320,10 +342,48 @@
 #define LOCI_SHOW_u32_service(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
 #define LOCI_SHOW_u32_status(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
 #define LOCI_SHOW_u16_subtype(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
-#define LOCI_SHOW_u32_ipv4_addr(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
-#define LOCI_SHOW_u32_ipv4_netmask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
-
-
-
+#define LOCI_SHOW_ipv4_ipv4_addr(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_ipv4_ipv4_netmask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_u8_l2_table_enable(writer, cookie, val) LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u16_l2_table_priority(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_ipv4_value(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_ipv4_value_mask(writer, cookie, val) LOCI_SHOW_ipv4(writer, cookie, val)
+#define LOCI_SHOW_u8_hybrid_enable(writer, cookie, val) LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u16_hybrid_version(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_bitmap_128_value(writer, cookie, val) LOCI_SHOW_bitmap_128(writer, cookie, val)
+#define LOCI_SHOW_bitmap_128_value_mask(writer, cookie, val) LOCI_SHOW_bitmap_128(writer, cookie, val)
+#define LOCI_SHOW_bitmap_128_bsn_in_ports_128(writer, cookie, val) LOCI_SHOW_bitmap_128(writer, cookie, val)
+#define LOCI_SHOW_u32_timeout_ms(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_tx_interval_ms(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u8_slot_num(writer, cookie, val) LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u32_bsn_lag_id(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_bsn_vrf(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u8_bsn_global_vrf_allowed(writer, cookie, val) LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u32_bsn_l3_interface_class_id(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_bsn_l3_src_class_id(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_bsn_l3_dst_class_id(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u8_convergence_status(writer, cookie, val) LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u16_actor_sys_priority(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_mac_actor_sys_mac(writer, cookie, val) LOCI_SHOW_mac(writer, cookie, val)
+#define LOCI_SHOW_u16_actor_port_priority(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u16_actor_port_num(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u16_actor_key(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u16_partner_sys_priority(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_mac_partner_sys_mac(writer, cookie, val) LOCI_SHOW_mac(writer, cookie, val)
+#define LOCI_SHOW_u16_partner_port_priority(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u16_partner_port_num(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u16_partner_key(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u64_time_ms(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
+#define LOCI_SHOW_desc_str_uri(writer, cookie, val) LOCI_SHOW_desc_str(writer, cookie, val)
+#define LOCI_SHOW_u8_state(writer, cookie, val) LOCI_SHOW_u8(writer, cookie, val)
+#define LOCI_SHOW_u16_table_id(writer, cookie, val) LOCI_SHOW_u16(writer, cookie, val)
+#define LOCI_SHOW_u32_deleted_count(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_error_count(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_checksum_128_checksum(writer, cookie, val) LOCI_SHOW_checksum_128(writer, cookie, val)
+#define LOCI_SHOW_checksum_128_checksum_mask(writer, cookie, val) LOCI_SHOW_checksum_128(writer, cookie, val)
+#define LOCI_SHOW_u32_buckets_size(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_entry_count(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u32_num_aux(writer, cookie, val) LOCI_SHOW_u32(writer, cookie, val)
+#define LOCI_SHOW_u64_checksum(writer, cookie, val) LOCI_SHOW_u64(writer, cookie, val)
 
 #endif /* _LOCI_SHOW_H_ */
diff --git a/c_gen/templates/loci_strings.c b/c_gen/templates/loci_strings.c
new file mode 100644
index 0000000..f546e12
--- /dev/null
+++ b/c_gen/templates/loci_strings.c
@@ -0,0 +1,75 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+:: include('_copyright.c')
+#include <loci/loci.h>
+#include <loci/of_object.h>
+#include "loci_log.h"
+#include "loci_int.h"
+
+const char *const of_object_id_str[] = {
+:: for cls in object_id_strs:
+    "${cls}",
+:: #endfor
+};
+
+const char *const of_version_str[] = {
+    "Unknown OpenFlow Version",
+    "OpenFlow-1.0",
+    "OpenFlow-1.1",
+    "OpenFlow-1.2"
+};
+
+const of_mac_addr_t of_mac_addr_all_ones = {
+    {
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+    }
+};
+/* Just to be explicit; static duration vars are init'd to 0 */
+const of_mac_addr_t of_mac_addr_all_zeros = {
+    {
+        0, 0, 0, 0, 0, 0
+    }
+};
+
+const of_ipv6_t of_ipv6_all_ones = {
+    {
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+    }
+};
+/* Just to be explicit; static duration vars are init'd to 0 */
+const of_ipv6_t of_ipv6_all_zeros = {
+    {
+        0, 0, 0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0, 0, 0
+    }
+};
+
+/** @var of_error_strings
+ * The error string map; use abs value to index
+ */
+const char *const of_error_strings[] = { OF_ERROR_STRINGS };
diff --git a/c_gen/templates/locitest/Makefile b/c_gen/templates/locitest/Makefile
new file mode 100644
index 0000000..70e57ca
--- /dev/null
+++ b/c_gen/templates/locitest/Makefile
@@ -0,0 +1,22 @@
+LOCITEST_SRCS := $(wildcard src/*.c)
+LOCI_SRCS := $(wildcard ../loci/src/*.c)
+
+LOCITEST_OBJS := $(LOCITEST_SRCS:.c=.o)
+LOCI_OBJS := $(LOCI_SRCS:.c=.o)
+
+CFLAGS := -Wall -Werror -g -Os
+CFLAGS += -Iinc -I../loci/inc -I ../loci/src
+
+all: locitest
+
+locitest: $(LOCITEST_OBJS) loci.a
+	$(CC) $^ -o $@
+
+loci.a: $(LOCI_OBJS)
+	ar rc $@ $^
+
+clean:
+	rm -f locitest loci.a $(LOCITEST_OBJS) $(LOCI_OBJS)
+
+# BSN build system magic
+MODULE := locitest
diff --git a/c_gen/templates/locitest/locitest_config.c b/c_gen/templates/locitest/locitest_config.c
index c7c6fa2..837010e 100644
--- a/c_gen/templates/locitest/locitest_config.c
+++ b/c_gen/templates/locitest/locitest_config.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /******************************************************************************
  *
diff --git a/c_gen/templates/locitest/locitest_enums.c b/c_gen/templates/locitest/locitest_enums.c
index 7c3f074..193f7cd 100644
--- a/c_gen/templates/locitest/locitest_enums.c
+++ b/c_gen/templates/locitest/locitest_enums.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /******************************************************************************
  *
diff --git a/c_gen/templates/locitest/locitest_int.h b/c_gen/templates/locitest/locitest_int.h
index b058604..b4d6c7f 100644
--- a/c_gen/templates/locitest/locitest_int.h
+++ b/c_gen/templates/locitest/locitest_int.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /******************************************************************************
  *
diff --git a/c_gen/templates/locitest/main.c b/c_gen/templates/locitest/main.c
new file mode 100644
index 0000000..86f7ae6
--- /dev/null
+++ b/c_gen/templates/locitest/main.c
@@ -0,0 +1,47 @@
+:: include('_copyright.c')
+
+/**
+ * @file test_main
+ *
+ * The main kickoff point for running all tests
+ */
+
+#include <locitest/unittest.h>
+#include <locitest/test_common.h>
+
+/* mcheck is a glibc extension */
+#if defined(__linux__)
+#include <mcheck.h>
+#define MCHECK_INIT mcheck(NULL)
+#else
+#define MCHECK_INIT do { } while (0)
+#endif
+
+int
+main(int argc, char *argv[])
+{
+    MCHECK_INIT;
+
+    RUN_TEST(ident_macros);
+
+    TEST_ASSERT(run_unified_accessor_tests() == TEST_PASS);
+    TEST_ASSERT(run_match_tests() == TEST_PASS);
+
+    TEST_ASSERT(run_utility_tests() == TEST_PASS);
+
+    /* These are deprecated by the unified accessor tests */
+    TEST_ASSERT(run_scalar_acc_tests() == TEST_PASS);
+    TEST_ASSERT(run_list_tests() == TEST_PASS);
+    TEST_ASSERT(run_message_tests() == TEST_PASS);
+    TEST_ASSERT(run_setup_from_add_tests() == TEST_PASS);
+
+    TEST_ASSERT(run_validator_tests() == TEST_PASS);
+
+    TEST_ASSERT(run_list_limits_tests() == TEST_PASS);
+
+    RUN_TEST(ext_objs);
+
+    TEST_ASSERT(test_datafiles() == TEST_PASS);
+
+    return global_error;
+}
diff --git a/c_gen/templates/locitest/test_data.c b/c_gen/templates/locitest/test_data.c
new file mode 100644
index 0000000..f4e985f
--- /dev/null
+++ b/c_gen/templates/locitest/test_data.c
@@ -0,0 +1,88 @@
+/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University */
+/* Copyright (c) 2011, 2012 Open Networking Foundation */
+/* Copyright (c) 2012, 2013 Big Switch Networks, Inc. */
+/* See the file LICENSE.loci which should have been included in the source distribution */
+
+/**
+ *
+ * AUTOMATICALLY GENERATED FILE.  Edits will be lost on regen.
+ *
+ * Data file tests for all versions.
+ */
+
+#include <locitest/test_common.h>
+
+<?py
+def hexarray(data, indent):
+    i = 0
+    text = []
+    text.append(" " * indent)
+    for byte in data:
+        text.append("0x%02x, " % ord(byte))
+        i += 1
+        if i == 8:
+            text.append("\n" + " " * indent)
+            i = 0
+        #endif
+    #endfor
+    return "".join(text)
+#end
+?>
+
+static void
+hexdump(const uint8_t *data, int len)
+{
+    int i = 0, j;
+    while (i < len) {
+	printf("%02x: ", i);
+	for (j = 0; j < 8 && i < len; j++, i++) {
+	    printf("%02x ", data[i]);
+	}
+	printf("\n");
+    }
+}
+
+static void
+show_failure(const uint8_t *a, int a_len, const uint8_t *b, int b_len)
+{
+    printf("\n--- Expected: (len=%d)\n", a_len);
+    hexdump(a, a_len);
+    printf("\n--- Actual: (len=%d)\n", b_len);
+    hexdump(b, b_len);
+}
+
+:: for test in tests:
+/* Generated from ${test['filename']} */
+static int
+test_${test['name']}(void) {
+    uint8_t binary[] = {
+${hexarray(test['binary'], indent=8)}
+    };
+
+    of_object_t *obj;
+
+${'\n'.join([' ' * 4 + x for x in test['c'].split("\n")])}
+
+    if (sizeof(binary) != WBUF_CURRENT_BYTES(OF_OBJECT_TO_WBUF(obj))
+        || memcmp(binary, WBUF_BUF(OF_OBJECT_TO_WBUF(obj)), sizeof(binary))) {
+	show_failure(binary, sizeof(binary),
+		     WBUF_BUF(OF_OBJECT_TO_WBUF(obj)),
+		     WBUF_CURRENT_BYTES(OF_OBJECT_TO_WBUF(obj)));
+	of_object_delete(obj);
+	return TEST_FAIL;
+    }
+
+    of_object_delete(obj);
+    return TEST_PASS;
+}
+
+:: #endfor
+
+int
+test_datafiles(void)
+{
+:: for test in tests:
+    RUN_TEST(${test['name']});
+:: #endfor
+    return TEST_PASS;
+}
diff --git a/c_gen/templates/locitest/test_ext.c b/c_gen/templates/locitest/test_ext.c
index 1efcaf6..87f9916 100644
--- a/c_gen/templates/locitest/test_ext.c
+++ b/c_gen/templates/locitest/test_ext.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2012, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  * Test extensions
diff --git a/c_gen/templates/locitest/test_list_limits.c b/c_gen/templates/locitest/test_list_limits.c
index aef91d8..d8368ff 100644
--- a/c_gen/templates/locitest/test_list_limits.c
+++ b/c_gen/templates/locitest/test_list_limits.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  * Test that list append fails gracefully when running out of wire buffer
diff --git a/c_gen/templates/locitest/test_match_utils.c b/c_gen/templates/locitest/test_match_utils.c
index 6e4a95e..8b116bf 100644
--- a/c_gen/templates/locitest/test_match_utils.c
+++ b/c_gen/templates/locitest/test_match_utils.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  *
diff --git a/c_gen/templates/locitest/test_setup_from_add.c b/c_gen/templates/locitest/test_setup_from_add.c
index 6988572..0b07076 100644
--- a/c_gen/templates/locitest/test_setup_from_add.c
+++ b/c_gen/templates/locitest/test_setup_from_add.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  * Test code for setup from flow add routines
@@ -33,10 +33,11 @@
 
 #include <locitest/test_common.h>
 
-#if !defined(__APPLE__)
+/* mcheck is a glibc extension */
+#if defined(__linux__)
 #include <mcheck.h>
 #define MCHECK_INIT mcheck(NULL)
-#else /* mcheck not available under OS X */
+#else
 #define MCHECK_INIT do { } while (0)
 #endif
 
diff --git a/c_gen/templates/locitest/test_utils.c b/c_gen/templates/locitest/test_utils.c
index adc6d42..bcd4194 100644
--- a/c_gen/templates/locitest/test_utils.c
+++ b/c_gen/templates/locitest/test_utils.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  *
diff --git a/c_gen/templates/locitest/test_validator.c b/c_gen/templates/locitest/test_validator.c
index cac67f7..a4b55eb 100644
--- a/c_gen/templates/locitest/test_validator.c
+++ b/c_gen/templates/locitest/test_validator.c
@@ -25,7 +25,10 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
+:: import c_gen.of_g_legacy as of_g
+:: import c_gen.loxi_utils_legacy as loxi_utils
+:: from c_gen import type_maps
 
 /**
  * Test message validator
@@ -102,12 +105,45 @@
     return TEST_PASS;
 }
 
+/*
+ * Create an instance of every message and run it through the validator.
+ */
+static int
+test_validate_all(void)
+{
+::    for version in of_g.of_version_range:
+::        ver_name = loxi_utils.version_to_name(version)
+::
+::        for cls in reversed(of_g.standard_class_order):
+::            if not loxi_utils.class_in_version(cls, version):
+::                continue
+::            elif type_maps.class_is_virtual(cls):
+::                continue
+::            elif not loxi_utils.class_is_message(cls):
+::                continue
+::            #endif
+    {
+        ${cls}_t *obj = ${cls}_new(${ver_name});
+        of_message_t msg;
+        ${cls}_${ver_name}_populate(obj, 1);
+        msg = OF_OBJECT_TO_MESSAGE(obj);
+        TEST_ASSERT(of_validate_message(msg, of_message_length_get(msg)) == 0);
+        ${cls}_delete(obj);
+    }
+
+::        #endfor
+::    #endfor
+
+    return TEST_PASS;
+}
+
 int
 run_validator_tests(void)
 {
     RUN_TEST(validate_fixed_length);
     RUN_TEST(validate_fixed_length_list);
     RUN_TEST(validate_tlv16_list);
+    RUN_TEST(validate_all);
 
     return TEST_PASS;
 }
diff --git a/c_gen/templates/locitest/unittest.h b/c_gen/templates/locitest/unittest.h
index 7ffc413..c897d67 100644
--- a/c_gen/templates/locitest/unittest.h
+++ b/c_gen/templates/locitest/unittest.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 #ifndef UNITTEST_H
 #define UNITTEST_H
diff --git a/c_gen/templates/of_buffer.h b/c_gen/templates/of_buffer.h
index 05cc587..1788939 100644
--- a/c_gen/templates/of_buffer.h
+++ b/c_gen/templates/of_buffer.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /****************************************************************
  *
diff --git a/c_gen/templates/of_doc.h b/c_gen/templates/of_doc.h
index 9e352f0..d9bf64b 100644
--- a/c_gen/templates/of_doc.h
+++ b/c_gen/templates/of_doc.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /**
  * @file of_doc.h
diff --git a/c_gen/templates/of_message.h b/c_gen/templates/of_message.h
index 77066bc..df74231 100644
--- a/c_gen/templates/of_message.h
+++ b/c_gen/templates/of_message.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /*
  * These routines manipulate a low level buffer assuming it holds
@@ -45,17 +45,25 @@
 #define OF_MESSAGE_LENGTH_OFFSET 2
 #define OF_MESSAGE_XID_OFFSET 4
 #define OF_MESSAGE_HEADER_LENGTH 8
+#define OF_MESSAGE_ERROR_TYPE_OFFSET 8
 #define OF_MESSAGE_STATS_TYPE_OFFSET 8
 #define OF_MESSAGE_FLOW_MOD_COMMAND_OFFSET(version) ((version) == 1 ? 56 : 25)
+#define OF_MESSAGE_GROUP_MOD_COMMAND_OFFSET 8
 
 #define OF_MESSAGE_MIN_LENGTH 8
 #define OF_MESSAGE_MIN_STATS_LENGTH (OF_MESSAGE_STATS_TYPE_OFFSET + 2)
+#define OF_MESSAGE_MIN_ERROR_LENGTH (OF_MESSAGE_ERROR_TYPE_OFFSET + 4)
 #define OF_MESSAGE_MIN_FLOW_MOD_LENGTH(version)  ((version) == 1 ? 57 : 26)
+#define OF_MESSAGE_MIN_GROUP_MOD_LENGTH (OF_MESSAGE_GROUP_MOD_COMMAND_OFFSET + 2)
 
 #define OF_MESSAGE_EXPERIMENTER_ID_OFFSET 8
 #define OF_MESSAGE_EXPERIMENTER_SUBTYPE_OFFSET 12
 #define OF_MESSAGE_EXPERIMENTER_MIN_LENGTH 16
 
+#define OF_MESSAGE_STATS_EXPERIMENTER_ID_OFFSET 16
+#define OF_MESSAGE_STATS_EXPERIMENTER_SUBTYPE_OFFSET 20
+#define OF_MESSAGE_STATS_EXPERIMENTER_MIN_LENGTH 24
+
 /**
  * The "default" free message function; NULL means use nominal malloc/free
  */
@@ -97,11 +105,6 @@
     return (of_version_t)msg[OF_MESSAGE_VERSION_OFFSET];
 }
 
-static inline void
-of_message_version_set(of_message_t msg, of_version_t version) {
-    buf_u8_set(msg, (uint8_t)version);
-}
-
 /**
  * @brief Get/set OpenFlow type of a message
  * @param msg Pointer to the message buffer of sufficient length
@@ -114,11 +117,6 @@
     return msg[OF_MESSAGE_TYPE_OFFSET];
 }
 
-static inline void
-of_message_type_set(of_message_t msg, uint8_t value) {
-    buf_u8_set(msg + OF_MESSAGE_TYPE_OFFSET, value);
-}
-
 /**
  * @brief Get/set in-buffer length of a message
  * @param msg Pointer to the message buffer of sufficient length
@@ -153,11 +151,6 @@
     return val;
 }
 
-static inline void
-of_message_xid_set(of_message_t msg, uint32_t xid) {
-    buf_u32_set(msg + OF_MESSAGE_XID_OFFSET, xid);
-}
-
 /**
  * @brief Get/set stats type of a message
  * @param msg Pointer to the message buffer of sufficient length
@@ -172,9 +165,18 @@
     return val;
 }
 
-static inline void
-of_message_stats_type_set(of_message_t msg, uint16_t type) {
-    buf_u16_set(msg + OF_MESSAGE_STATS_TYPE_OFFSET, type);
+/**
+ * @brief Get/set error type of a message
+ * @param msg Pointer to the message buffer of sufficient length
+ * @param type Data for set operation
+ * @returns get returns error type in host order
+ */
+
+static inline uint16_t
+of_message_error_type_get(of_message_t msg) {
+    uint16_t val;
+    buf_u16_get(msg + OF_MESSAGE_ERROR_TYPE_OFFSET, &val);
+    return val;
 }
 
 
@@ -192,11 +194,6 @@
     return val;
 }
 
-static inline void
-of_message_experimenter_id_set(of_message_t msg, uint32_t experimenter_id) {
-    buf_u32_set(msg + OF_MESSAGE_EXPERIMENTER_ID_OFFSET, experimenter_id);
-}
-
 
 /**
  * @brief Get/set experimenter message type (subtype) of a message
@@ -212,13 +209,6 @@
     return val;
 }
 
-static inline void
-of_message_experimenter_subtype_set(of_message_t msg,
-                                    uint32_t subtype) {
-    buf_u32_set(msg + OF_MESSAGE_EXPERIMENTER_SUBTYPE_OFFSET,
-                subtype);
-}
-
 /**
  * Flow mod command changed from 16 to 8 bits on the wire from 1.0 to 1.1
  */
@@ -236,17 +226,46 @@
     return val8;
 }
 
-static inline void
-of_message_flow_mod_command_set(of_message_t msg, of_version_t version, 
-                                uint8_t command) {
-    uint16_t val16;
+/**
+ * @brief Get/set stats request/reply experimenter ID of a message
+ * @param msg Pointer to the message buffer of sufficient length
+ * @param experimenter_id Data for set operation
+ * @returns get returns experimenter id in host order
+ */
 
-    if (version == OF_VERSION_1_0) {
-        val16 = command;
-        buf_u16_set(msg + OF_MESSAGE_FLOW_MOD_COMMAND_OFFSET(version), val16);
-    } else {
-        buf_u8_set(msg + OF_MESSAGE_FLOW_MOD_COMMAND_OFFSET(version), command);
-    }
+static inline uint32_t
+of_message_stats_experimenter_id_get(of_message_t msg) {
+    uint32_t val;
+    buf_u32_get(msg + OF_MESSAGE_STATS_EXPERIMENTER_ID_OFFSET, &val);
+    return val;
+}
+
+/**
+ * @brief Get/set stats request/reply experimenter subtype of a message
+ * @param msg Pointer to the message buffer of sufficient length
+ * @param subtype Data for set operation
+ * @returns get returns experimenter subtype in host order
+ */
+
+static inline uint32_t
+of_message_stats_experimenter_subtype_get(of_message_t msg) {
+    uint32_t val;
+    buf_u32_get(msg + OF_MESSAGE_STATS_EXPERIMENTER_SUBTYPE_OFFSET, &val);
+    return val;
+}
+
+/**
+ * @brief Get/set group mod command of a message
+ * @param msg Pointer to the message buffer of sufficient length
+ * @param subtype Data for set operation
+ * @returns get returns command in host order
+ */
+
+static inline uint16_t
+of_message_group_mod_command_get(of_message_t msg) {
+    uint16_t val;
+    buf_u16_get(msg + OF_MESSAGE_GROUP_MOD_COMMAND_OFFSET, &val);
+    return val;
 }
 
 #endif /* _OF_MESSAGE_H_ */
diff --git a/c_gen/templates/of_object.c b/c_gen/templates/of_object.c
index a5cfecd..9b1bafd 100644
--- a/c_gen/templates/of_object.c
+++ b/c_gen/templates/of_object.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /****************************************************************
  *
@@ -39,17 +39,6 @@
 #include <loci/loci.h>
 #include <loci/loci_validator.h>
 
-#if defined(OF_OBJECT_TRACKING)
-#include <BigList/biglist.h>
-
-loci_object_track_t loci_global_tracking;
-
-#define TRACK (&loci_global_tracking)
-#define TRACK_OBJS (TRACK->objects)
-#define CHECK_MAX(val, max) if ((val) > (max)) (max) = (val)
-
-#endif
-
 /**
  * Create a generic new object and possibly underlying wire buffer
  * @param bytes The number of bytes to allocate in the underlying buffer
@@ -66,10 +55,10 @@
 {
     of_object_t *obj;
 
-    if ((obj = (of_object_t *)MALLOC(sizeof(of_generic_t))) == NULL) {
+    if ((obj = (of_object_t *)MALLOC(sizeof(*obj))) == NULL) {
         return NULL;
     }
-    MEMSET(obj, 0, sizeof(of_generic_t));
+    MEMSET(obj, 0, sizeof(*obj));
 
     if (bytes > 0) {
         if ((obj->wire_object.wbuf = of_wire_buffer_new(bytes)) == NULL) {
@@ -98,20 +87,6 @@
         return;
     }
 
-#if defined(OF_OBJECT_TRACKING)
-    ASSERT(obj->track_info.magic == OF_OBJECT_TRACKING_MAGIC &&
-           "of_object double free?");
-    LOCI_LOG_TRACE("OF obj delete %p.  Wire buf %p.\n", obj,
-                   obj->wire_object.wbuf);
-    ASSERT(TRACK->count_current > 0);
-    TRACK->count_current -= 1;
-    TRACK->deletes += 1;
-
-    TRACK_OBJS = biglist_remove_link_free(TRACK_OBJS,
-                                          obj->track_info.bl_entry);
-    obj->track_info.magic = 0;
-#endif
-
     /*
      * Make callback if present
      */
@@ -134,12 +109,12 @@
  */
 
 of_object_t *
-of_object_dup_(of_object_t *src)
+of_object_dup(of_object_t *src)
 {
     of_object_t *dst;
     of_object_init_f init_fn;
 
-    if ((dst = (of_object_t *)MALLOC(sizeof(of_generic_t))) == NULL) {
+    if ((dst = (of_object_t *)MALLOC(sizeof(*dst))) == NULL) {
         return NULL;
     }
 
@@ -163,107 +138,6 @@
     return dst;
 }
 
-#if defined(OF_OBJECT_TRACKING)
-
-/**
- * Record an object for tracking
- *
- * @param obj The object being tracked
- * @param file The file name where the allocation is happening
- * @param line The line number in the file where the alloc is happening
- */
-
-void
-of_object_track(of_object_t *obj, const char *file, int line)
-{
-    if (obj != NULL) {
-        LOCI_LOG_TRACE("OF obj track %p, wire buf %p\n%s:%d\\n",
-                      obj, obj->wire_object.wbuf, file, line);
-        obj->track_info.file = file;
-        obj->track_info.line = line;
-        TRACK_OBJS = biglist_prepend(TRACK_OBJS, (void *)obj);
-        obj->track_info.bl_entry = TRACK_OBJS;
-        obj->track_info.magic = OF_OBJECT_TRACKING_MAGIC;
-
-        TRACK->allocs += 1;
-        TRACK->count_current += 1;
-        CHECK_MAX(TRACK->count_current, TRACK->count_max);
-    }
-}
-
-/**
- * The dup function when tracking is enabled
- */
-
-of_object_t *
-of_object_dup_tracking(of_object_t *src, const char *file, int line)
-{
-    of_object_t *obj;
-
-    obj = of_object_dup_(src);
-    of_object_track(obj, file, line);
-
-    return obj;
-}
-
-/**
- * Display track info for one object
- */
-
-void
-of_object_track_output(of_object_t *obj, loci_writer_f writer, void* cookie)
-{
-    const char *offset;
-    static const char *unknown = "Unknown file";
-
-    if (obj->track_info.file) {
-        offset = strstr(obj->track_info.file, "Modules/");
-        if (offset == NULL) {
-            offset = obj->track_info.file;
-        } else {
-            offset += 8; /* Jump over Modules/ too */
-        }
-    } else {
-        offset = unknown;
-    }
-    writer(cookie, "obj %p. type %s.\n%s:%d\n",
-               obj, of_object_id_str[obj->object_id],
-               offset, obj->track_info.line);
-}
-
-/**
- * Dump out the current object list from LOCI
- *
- * @param log_fn The output printf vector
- *
- */
-
-void
-of_object_track_report(loci_writer_f writer, void* cookie)
-{
-    biglist_t *elt;
-    of_object_t *obj;
-    int count = 0;
-
-    writer(cookie, "\nLOCI Outstanding object list.\n");
-    writer(cookie, "Objs: Current %d. Max %d. Created %d. Deleted %d\n",
-               TRACK->count_current, TRACK->count_max, TRACK->allocs,
-               TRACK->deletes);
-    if (TRACK_OBJS) {
-        BIGLIST_FOREACH_DATA(elt, TRACK_OBJS, of_object_t *, obj) {
-            of_object_track_output(obj, writer, cookie);
-            ++count;
-        }
-    }
-    if (count != TRACK->count_current) {
-        writer(cookie, "\nERROR:  List has %d, but track count is %d\n",
-                   count, TRACK->count_current);
-    }
-    writer(cookie, "\nEnd of outstanding object list\n");
-}
-
-#endif
-
 /**
  * Generic new from message call
  */
@@ -302,11 +176,6 @@
     obj->length = len;
     obj->version = version;
 
-#if defined(OF_OBJECT_TRACKING)
-    /* @FIXME Would be nice to get caller; for now only in cxn_instance */
-    of_object_track(obj, __FILE__, __LINE__);
-#endif
-
     return obj;
 }
 
@@ -510,7 +379,7 @@
     }
 
     if (child->wire_type_set) {
-        child->wire_type_set(child, child->object_id);
+        child->wire_type_set(child);
     }
 
     /* Update the parent's length */
@@ -641,6 +510,90 @@
     obj->wire_object.wbuf = NULL;
 }
 
+#define _MAX_PARENT_ITERATIONS 4
+/**
+ * Iteratively update parent lengths thru hierarchy
+ * @param obj The object whose length is being updated
+ * @param delta The difference between the current and new lengths
+ *
+ * Note that this includes updating the object itself.  It will
+ * iterate thru parents.
+ *
+ * Assumes delta > 0.
+ */
+void
+of_object_parent_length_update(of_object_t *obj, int delta)
+{
+#ifndef NDEBUG
+    int count = 0;
+    of_wire_buffer_t *wbuf;  /* For debug asserts only */
+#endif
+
+    while (obj != NULL) {
+        ASSERT(count++ < _MAX_PARENT_ITERATIONS);
+        obj->length += delta;
+        if (obj->wire_length_set != NULL) {
+            obj->wire_length_set(obj, obj->length);
+        }
+#ifndef NDEBUG
+        wbuf = obj->wire_object.wbuf;
+#endif
+
+        /* Asserts for wire length checking */
+        ASSERT(obj->length + obj->wire_object.obj_offset <=
+               WBUF_CURRENT_BYTES(wbuf));
+        if (obj->parent == NULL) {
+            ASSERT(obj->length + obj->wire_object.obj_offset ==
+                   WBUF_CURRENT_BYTES(wbuf));
+        }
+
+        obj = obj->parent;
+    }
+}
+
+/**
+ * Use the type/length from the wire buffer and init the object
+ * @param obj The object being initialized
+ * @param base_object_id If > 0, this indicates the base object
+ * @param max_len If > 0, the max length to expect for the obj
+ * type for inheritance checking
+ * @return OF_ERROR_
+ *
+ * Used for inheritance type objects such as actions and OXMs
+ * The type is checked and if valid, the object is initialized.
+ * Then the length is taken from the buffer.
+ *
+ * Note that the object version must already be properly set.
+ */
+int
+of_object_wire_init(of_object_t *obj, of_object_id_t base_object_id,
+                    int max_len)
+{
+    if (obj->wire_type_get != NULL) {
+        of_object_id_t id;
+        obj->wire_type_get(obj, &id);
+        if (!of_wire_id_valid(id, base_object_id)) {
+            return OF_ERROR_PARSE;
+        }
+        obj->object_id = id;
+        /* Call the init function for this object type; do not push to wire */
+        of_object_init_map[id]((of_object_t *)(obj), obj->version, -1, 0);
+    }
+    if (obj->wire_length_get != NULL) {
+        int length;
+        obj->wire_length_get(obj, &length);
+        if (length < 0 || (max_len > 0 && length > max_len)) {
+            return OF_ERROR_PARSE;
+        }
+        obj->length = length;
+    } else {
+        /* @fixme Does this cover everything else? */
+        obj->length = of_object_fixed_len[obj->version][base_object_id];
+    }
+
+    return OF_ERROR_NONE;
+}
+
 /*
  * Set member:
  *    get_wbuf_extent
diff --git a/c_gen/templates/of_object.h b/c_gen/templates/of_object.h
index cb9342e..0e761fd 100644
--- a/c_gen/templates/of_object.h
+++ b/c_gen/templates/of_object.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /*
  * @fixme THIS FILE NEEDS CLEANUP.  It may just go away.
@@ -46,10 +46,7 @@
 #include <loci/of_match.h>
 #include <loci/loci_base.h>
 #include <loci/of_message.h>
-
-#if defined(OF_OBJECT_TRACKING)
-#include <BigList/biglist.h>
-#endif
+#include <loci/of_wire_buf.h>
 
 /**
  * This is the number of bytes reserved for metadata in each
@@ -57,6 +54,24 @@
  */
 #define OF_OBJECT_METADATA_BYTES 32
 
+/*
+ * Generic accessors:
+ *
+ * Many objects have a length represented in the wire buffer
+ * wire_length_get and wire_length_set access these values directly on the
+ * wire.
+ *
+ * Many objects have a length represented in the wire buffer
+ * wire_length_get and wire_length_set access these values directly on the
+ * wire.
+ *
+ * FIXME: TBD if wire_length_set and wire_type_set are required.
+ */
+typedef void (*of_wire_length_get_f)(of_object_t *obj, int *bytes);
+typedef void (*of_wire_length_set_f)(of_object_t *obj, int bytes);
+typedef void (*of_wire_type_get_f)(of_object_t *obj, of_object_id_t *id);
+typedef void (*of_wire_type_set_f)(of_object_t *obj);
+
 /****************************************************************
  * General list operations: first, next, append_setup, append_advance
  ****************************************************************/
@@ -74,79 +89,18 @@
 extern int of_list_append(of_object_t *list, of_object_t *item);
 
 extern of_object_t *of_object_new(int bytes);
-extern of_object_t * of_object_dup_(of_object_t *src);
+extern of_object_t *of_object_dup(of_object_t *src);
 
 /**
  * Callback function prototype for deleting an object
  */
 typedef void (*of_object_delete_callback_f)(of_object_t *obj);
 
-#if defined(OF_OBJECT_TRACKING)
-/**
- * When tracking is enabled, the location of each new or dup
- * call of an OF object is recorded and a list is kept of all
- * outstanding objects.
- *
- * This dovetails with using objects to track outstanding operations
- * for barrier processing.
- */
-
-/**
- * Global tracking stats
- */
-typedef struct loci_object_track_s {
-    biglist_t *objects;
-    int count_current;
-    uint32_t count_max;
-    uint32_t allocs;
-    uint32_t deletes;
-} loci_object_track_t;
-
-extern loci_object_track_t loci_global_tracking;
-
-/* Remap dup call to tracking */
-extern of_object_t * of_object_dup_tracking(of_object_t *src,
-                                            const char *file, int line);
-#define of_object_dup(src) of_object_dup_tracking(src, __FILE__, __LINE__)
-extern void of_object_track(of_object_t *obj, const char *file, int line);
-
-extern void of_object_track_output(of_object_t *obj, loci_writer_f writer, void* cookie); 
-extern void of_object_track_report(loci_writer_f writer, void* cookie); 
-
-/**
- * The data stored in each object related to tracking and
- * The LOCI client may install a delete callback function to allow
- * the notification of an object's destruction.
- */
-
-typedef struct of_object_track_info_s {
-    of_object_delete_callback_f delete_cb;  /* To be implemented */
-    void *delete_cookie;
-
-    /* Track file and line where allocated */
-    const char *file;
-    int line;
-    biglist_t *bl_entry; /* Pointer to self */
-    uint32_t magic; /* validation value */
-} of_object_track_info_t;
-
-#define OF_OBJECT_TRACKING_MAGIC 0x11235813
-#else
-
-/* Use native dup call */
-#define of_object_dup of_object_dup_
-
-/**
- * When tracking is not enabled, we still support a delete callback
- */
-
 typedef struct of_object_track_info_s {
     of_object_delete_callback_f delete_cb;  /* To be implemented */
     void *delete_cookie;
 } of_object_track_info_t;
 
-#endif
-
 extern int of_object_xid_set(of_object_t *obj, uint32_t xid);
 extern int of_object_xid_get(of_object_t *obj, uint32_t *xid);
 
@@ -173,4 +127,46 @@
 
 int of_object_can_grow(of_object_t *obj, int new_len);
 
+void of_object_parent_length_update(of_object_t *obj, int delta);
+
+struct of_object_s {
+    /* The control block for the underlying data buffer */
+    of_wire_object_t wire_object;
+    /* The LOCI type enum value of the object */
+    of_object_id_t object_id;
+
+    /*
+     * Objects need to track their "parent" so that updates to the
+     * object that affect its length can be pushed to the parent.
+     * Treat as private.
+     */
+    of_object_t *parent;
+
+    /*
+     * Not all objects have length and version on the wire so we keep
+     * them here.  NOTE: Infrastructure manages length and version.
+     * Treat length as private and version as read only.
+     */
+    int length;
+    of_version_t version;
+
+    /*
+     * Many objects have a length and/or type represented in the wire buffer
+     * These accessors get and set those value when present.  Treat as private.
+     */
+    of_wire_length_get_f wire_length_get;
+    of_wire_length_set_f wire_length_set;
+    of_wire_type_get_f wire_type_get;
+    of_wire_type_set_f wire_type_set;
+
+    of_object_track_info_t track_info;
+
+    /*
+     * Metadata available for applications.  Ensure 8-byte alignment, but
+     * that buffer is at least as large as requested.  This data is not used
+     * or inspected by LOCI.
+     */
+    uint64_t metadata[(OF_OBJECT_METADATA_BYTES + 7) / 8];
+};
+
 #endif /* _OF_OBJECT_H_ */
diff --git a/c_gen/templates/of_type_maps.c b/c_gen/templates/of_type_maps.c
index f1fab57..9bc3d5c 100644
--- a/c_gen/templates/of_type_maps.c
+++ b/c_gen/templates/of_type_maps.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /****************************************************************
  *
@@ -37,6 +37,11 @@
 #include <loci/loci.h>
 #include <loci/of_message.h>
 
+#define OF_INSTRUCTION_EXPERIMENTER_ID_OFFSET 4
+#define OF_INSTRUCTION_EXPERIMENTER_SUBTYPE_OFFSET 8
+
+${legacy_code}
+
 /****************************************************************
  * Top level OpenFlow message length functions
  ****************************************************************/
@@ -136,30 +141,6 @@
 }
 
 /**
- * Set the object ID based on the wire buffer for any TLV object
- * @param obj The object being referenced
- * @param id The ID value representing what should be stored.
- */
-
-void
-of_tlv16_wire_object_id_set(of_object_t *obj, of_object_id_t id)
-{
-    int wire_type;
-    of_wire_buffer_t *wbuf = OF_OBJECT_TO_WBUF(obj);
-    ASSERT(wbuf != NULL);
-
-    wire_type = of_object_to_type_map[obj->version][id];
-    ASSERT(wire_type >= 0);
-
-    of_wire_buffer_u16_set(wbuf, 
-        OF_OBJECT_ABSOLUTE_OFFSET(obj, TLV16_WIRE_TYPE_OFFSET), wire_type);
-
-    if (wire_type == OF_EXPERIMENTER_TYPE) {
-        of_extension_object_id_set(obj, id);
-    }
-}
-
-/**
  * Get the object ID of an extended action
  * @param obj The object being referenced
  * @param id Where to store the object ID
@@ -206,41 +187,6 @@
 }
 
 /**
- * Set wire data for extension objects, not messages.
- *
- * Currently only handles BSN mirror; ignores all others
- */
-
-void
-of_extension_object_id_set(of_object_t *obj, of_object_id_t id)
-{
-    uint8_t *buf = OF_OBJECT_BUFFER_INDEX(obj, 0);
-    
-    switch (id) {
-    case OF_ACTION_BSN_MIRROR:
-    case OF_ACTION_ID_BSN_MIRROR:
-        buf_u32_set(buf + OF_ACTION_EXPERIMENTER_ID_OFFSET,
-                    OF_EXPERIMENTER_ID_BSN);
-        buf_u32_set(buf + OF_ACTION_EXPERIMENTER_SUBTYPE_OFFSET, 1);
-        break;
-    case OF_ACTION_BSN_SET_TUNNEL_DST:
-    case OF_ACTION_ID_BSN_SET_TUNNEL_DST:
-        buf_u32_set(buf + OF_ACTION_EXPERIMENTER_ID_OFFSET,
-                    OF_EXPERIMENTER_ID_BSN);
-        buf_u32_set(buf + OF_ACTION_EXPERIMENTER_SUBTYPE_OFFSET, 2);
-        break;
-    case OF_ACTION_NICIRA_DEC_TTL:
-    case OF_ACTION_ID_NICIRA_DEC_TTL:
-        buf_u32_set(buf + OF_ACTION_EXPERIMENTER_ID_OFFSET,
-                    OF_EXPERIMENTER_ID_NICIRA);
-        buf_u16_set(buf + OF_ACTION_EXPERIMENTER_SUBTYPE_OFFSET, 18);
-        break;
-    default:
-        break;
-    }
-}
-
-/**
  * Get the object ID of an extended action
  * @param obj The object being referenced
  * @param id Where to store the object ID
@@ -339,10 +285,28 @@
 static int
 extension_instruction_object_id_get(of_object_t *obj, of_object_id_t *id)
 {
-    (void)obj;
+    uint32_t exp_id;
+    uint8_t *buf;
 
     *id = OF_INSTRUCTION_EXPERIMENTER;
 
+    buf = OF_OBJECT_BUFFER_INDEX(obj, 0);
+
+    buf_u32_get(buf + OF_INSTRUCTION_EXPERIMENTER_ID_OFFSET, &exp_id);
+
+    switch (exp_id) {
+    case OF_EXPERIMENTER_ID_BSN: {
+        uint32_t subtype;
+        buf_u32_get(buf + OF_INSTRUCTION_EXPERIMENTER_SUBTYPE_OFFSET, &subtype);
+        switch (subtype) {
+        case 0: *id = OF_INSTRUCTION_BSN_DISABLE_SRC_MAC_CHECK; break;
+        case 1: *id = OF_INSTRUCTION_BSN_ARP_OFFLOAD; break;
+        case 2: *id = OF_INSTRUCTION_BSN_DHCP_OFFLOAD; break;
+        }
+        break;
+    }
+    }
+
     return OF_ERROR_NONE;
 }
 
@@ -484,6 +448,23 @@
     ASSERT(*id != OF_OBJECT_INVALID);
 }
 
+/**
+ * Get the object ID based on the wire buffer for a bsn_tlv object
+ * @param obj The object being referenced
+ * @param id Where to store the object ID
+ */
+
+void
+of_bsn_tlv_wire_object_id_get(of_object_t *obj, of_object_id_t *id)
+{
+    int wire_type;
+
+    of_tlv16_wire_type_get(obj, &wire_type);
+    ASSERT(wire_type >= 0 && wire_type < OF_BSN_TLV_ITEM_COUNT);
+    *id = of_bsn_tlv_type_to_id[obj->version][wire_type];
+    ASSERT(*id != OF_OBJECT_INVALID);
+}
+
 /****************************************************************
  * OXM type/length functions.
  ****************************************************************/
@@ -529,27 +510,6 @@
 }
 
 /**
- * Set the length of an OXM object in the wire buffer
- * @param obj The object whose wire buffer is an OXM type
- * @param bytes Value to store in wire buffer
- */
-
-void
-of_oxm_wire_length_set(of_object_t *obj, int bytes)
-{
-    uint32_t type_len;
-    of_wire_buffer_t *wbuf;
-
-    ASSERT(bytes >= 0 && bytes < 256);
-
-    /* Read-modify-write */
-    _GET_OXM_TYPE_LEN(obj, &type_len, wbuf);
-    OF_OXM_LENGTH_SET(type_len, bytes);
-    of_wire_buffer_u32_set(wbuf, 
-           OF_OBJECT_ABSOLUTE_OFFSET(obj, OXM_HDR_OFFSET), type_len);
-}
-
-/**
  * Get the object ID of an OXM object based on the wire buffer type
  * @param obj The object whose wire buffer is an OXM type
  * @param id (out) Where the ID is stored 
@@ -559,40 +519,12 @@
 of_oxm_wire_object_id_get(of_object_t *obj, of_object_id_t *id)
 {
     uint32_t type_len;
-    int wire_type;
     of_wire_buffer_t *wbuf;
 
     _GET_OXM_TYPE_LEN(obj, &type_len, wbuf);
-    wire_type = OF_OXM_MASKED_TYPE_GET(type_len);
-    *id = of_oxm_to_object_id(wire_type, obj->version);
+    *id = of_oxm_to_object_id(type_len, obj->version);
 }
 
-/**
- * Set the wire type of an OXM object based on the object ID passed
- * @param obj The object whose wire buffer is an OXM type
- * @param id The object ID mapped to an OXM wire type which is stored
- */
-
-void
-of_oxm_wire_object_id_set(of_object_t *obj, of_object_id_t id)
-{
-    uint32_t type_len;
-    int wire_type;
-    of_wire_buffer_t *wbuf;
-
-    ASSERT(OF_OXM_VALID_ID(id));
-
-    /* Read-modify-write */
-    _GET_OXM_TYPE_LEN(obj, &type_len, wbuf);
-    wire_type = of_object_to_wire_type(id, obj->version);
-    ASSERT(wire_type >= 0);
-    OF_OXM_MASKED_TYPE_SET(type_len, wire_type);
-    of_wire_buffer_u32_set(wbuf, 
-           OF_OBJECT_ABSOLUTE_OFFSET(obj, OXM_HDR_OFFSET), type_len);
-}
-
-
-
 #define OF_U16_LEN_LENGTH_OFFSET 0
 
 /**
@@ -794,3 +726,47 @@
 
     return OF_ERROR_NONE;
 }
+
+int
+of_experimenter_stats_request_to_object_id(uint32_t experimenter, uint32_t subtype, int ver)
+{
+    switch (experimenter) {
+    case OF_EXPERIMENTER_ID_BSN:
+        switch (subtype) {
+        case 1: return OF_BSN_LACP_STATS_REQUEST;
+        case 2: return OF_BSN_GENTABLE_ENTRY_DESC_STATS_REQUEST;
+        case 3: return OF_BSN_GENTABLE_ENTRY_STATS_REQUEST;
+        case 4: return OF_BSN_GENTABLE_DESC_STATS_REQUEST;
+        case 5: return OF_BSN_GENTABLE_BUCKET_STATS_REQUEST;
+        case 6: return OF_BSN_SWITCH_PIPELINE_STATS_REQUEST;
+        case 7: return OF_BSN_GENTABLE_STATS_REQUEST;
+        case 8: return OF_BSN_PORT_COUNTER_STATS_REQUEST;
+        case 9: return OF_BSN_VLAN_COUNTER_STATS_REQUEST;
+        case 10: return OF_BSN_FLOW_CHECKSUM_BUCKET_STATS_REQUEST;
+        case 11: return OF_BSN_TABLE_CHECKSUM_STATS_REQUEST;
+        }
+    }
+    return OF_OBJECT_INVALID;
+}
+
+int
+of_experimenter_stats_reply_to_object_id(uint32_t experimenter, uint32_t subtype, int ver)
+{
+    switch (experimenter) {
+    case OF_EXPERIMENTER_ID_BSN:
+        switch (subtype) {
+        case 1: return OF_BSN_LACP_STATS_REPLY;
+        case 2: return OF_BSN_GENTABLE_ENTRY_DESC_STATS_REPLY;
+        case 3: return OF_BSN_GENTABLE_ENTRY_STATS_REPLY;
+        case 4: return OF_BSN_GENTABLE_DESC_STATS_REPLY;
+        case 5: return OF_BSN_GENTABLE_BUCKET_STATS_REPLY;
+        case 6: return OF_BSN_SWITCH_PIPELINE_STATS_REPLY;
+        case 7: return OF_BSN_GENTABLE_STATS_REPLY;
+        case 8: return OF_BSN_PORT_COUNTER_STATS_REPLY;
+        case 9: return OF_BSN_VLAN_COUNTER_STATS_REPLY;
+        case 10: return OF_BSN_FLOW_CHECKSUM_BUCKET_STATS_REPLY;
+        case 11: return OF_BSN_TABLE_CHECKSUM_STATS_REPLY;
+        }
+    }
+    return OF_OBJECT_INVALID;
+}
diff --git a/c_gen/templates/of_utils.c b/c_gen/templates/of_utils.c
index f9c1972..ba22536 100644
--- a/c_gen/templates/of_utils.c
+++ b/c_gen/templates/of_utils.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /****************************************************************
  * File: of_utils.h
diff --git a/c_gen/templates/of_utils.h b/c_gen/templates/of_utils.h
index 1a3a29a..5510d67 100644
--- a/c_gen/templates/of_utils.h
+++ b/c_gen/templates/of_utils.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /****************************************************************
  * File: of_utils.h
diff --git a/c_gen/templates/of_wire_buf.c b/c_gen/templates/of_wire_buf.c
index acfcb84..13da8e3 100644
--- a/c_gen/templates/of_wire_buf.c
+++ b/c_gen/templates/of_wire_buf.c
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 /****************************************************************
  *
@@ -138,11 +138,7 @@
     /* Doesn't make sense; mismatch in current buffer info */
     ASSERT(old_len + offset <= wbuf->current_bytes);
 
-    if (old_len < new_len) {
-        of_wire_buffer_grow(wbuf, offset + new_len);
-    } else {
-        wbuf->current_bytes += (new_len - old_len); // may decrease size
-    }
+    wbuf->current_bytes += (new_len - old_len); // may decrease size
 
     if ((old_len + offset < cur_bytes) && (old_len != new_len)) {
         /* Need to move back of buffer */
diff --git a/c_gen/templates/of_wire_buf.h b/c_gen/templates/of_wire_buf.h
index cf1ee17..f977004 100644
--- a/c_gen/templates/of_wire_buf.h
+++ b/c_gen/templates/of_wire_buf.h
@@ -25,7 +25,7 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-/* Copyright 2013, Big Switch Networks, Inc. */
+:: include('_copyright.c')
 
 #if !defined(_OF_WIRE_BUF_H_)
 #define _OF_WIRE_BUF_H_
@@ -348,6 +348,38 @@
     buf_u32_set(OF_WIRE_BUFFER_INDEX(wbuf, offset), value);
 }
 
+
+/**
+ * Get a uint32_t scalar from a wire buffer
+ * @param wbuf The pointer to the wire buffer structure
+ * @param offset Offset in the wire buffer
+ * @param value Pointer to where to put value
+ *
+ * The underlying buffer accessor funtions handle endian and alignment.
+ */
+
+static inline void
+of_wire_buffer_ipv4_get(of_wire_buffer_t *wbuf, int offset, of_ipv4_t *value)
+{
+    of_wire_buffer_u32_get(wbuf, offset, value);
+}
+
+/**
+ * Set a ipv4 (uint32_t) scalar in a wire buffer
+ * @param wbuf The pointer to the wire buffer structure
+ * @param offset Offset in the wire buffer
+ * @param value The value to store
+ *
+ * The underlying buffer accessor funtions handle endian and alignment.
+ */
+
+static inline void
+of_wire_buffer_ipv4_set(of_wire_buffer_t *wbuf, int offset, of_ipv4_t value)
+{
+    of_wire_buffer_u32_set(wbuf, offset, value);
+}
+
+
 /**
  * Get a uint64_t scalar from a wire buffer
  * @param wbuf The pointer to the wire buffer structure
@@ -844,6 +876,50 @@
 #define of_wire_buffer_ipv6_set(buf, offset, addr) \
     _wbuf_octets_set(buf, offset, (uint8_t *)&addr, sizeof(of_ipv6_t))
 
+/**
+ * Get an bitmap_128 address from a wire buffer
+ * @param wbuf The pointer to the wire buffer structure
+ * @param offset Offset in the wire buffer
+ * @param addr Pointer to where to store the bitmap_128 address
+ *
+ * Uses the octets function.
+ */
+
+#define of_wire_buffer_bitmap_128_get(buf, offset, addr) \
+    (of_wire_buffer_u64_get(buf, offset, &addr->hi), of_wire_buffer_u64_get(buf, offset+8, &addr->lo))
+
+/**
+ * Set an bitmap_128 address in a wire buffer
+ * @param wbuf The pointer to the wire buffer structure
+ * @param offset Offset in the wire buffer
+ * @param addr The variable holding bitmap_128 address to store
+ *
+ * Uses the octets function.
+ */
+
+#define of_wire_buffer_bitmap_128_set(buf, offset, addr) \
+    (of_wire_buffer_u64_set(buf, offset, addr.hi), of_wire_buffer_u64_set(buf, offset+8, addr.lo))
+
+/**
+ * Get a checksum_128 from a wire buffer
+ * @param wbuf The pointer to the wire buffer structure
+ * @param offset Offset in the wire buffer
+ * @param checksum Pointer to where to store the checksum_128
+ */
+
+#define of_wire_buffer_checksum_128_get(buf, offset, checksum) \
+    (of_wire_buffer_u64_get(buf, offset, &checksum->hi), of_wire_buffer_u64_get(buf, offset+8, &checksum->lo))
+
+/**
+ * Set a checksum_128 in a wire buffer
+ * @param wbuf The pointer to the wire buffer structure
+ * @param offset Offset in the wire buffer
+ * @param checksum The variable holding checksum_128 to store
+ */
+
+#define of_wire_buffer_checksum_128_set(buf, offset, checksum) \
+    (of_wire_buffer_u64_set(buf, offset, checksum.hi), of_wire_buffer_u64_set(buf, offset+8, checksum.lo))
+
 /* Relocate data from start offset to the end of the buffer to a new position */
 static inline void
 of_wire_buffer_move_end(of_wire_buffer_t *wbuf, int start_offset, int new_offset)
diff --git a/loxi_front_end/translation.py b/c_gen/translation.py
similarity index 97%
rename from loxi_front_end/translation.py
rename to c_gen/translation.py
index 9ffc210..fb64fa9 100644
--- a/loxi_front_end/translation.py
+++ b/c_gen/translation.py
@@ -31,7 +31,7 @@
 
 import re
 import sys
-        
+
 def loxi_name(ident):
     """
     Return the LOXI name of an openflow.h identifier
@@ -89,7 +89,7 @@
         dict(OFPM_ = "OF_METER_"),
         dict(OFPXMC_ = "OF_OXM_CLASS_"),
         dict(OFPVID_ = "OF_VLAN_TAG_"),
-        dict(OFPGC_ = "OF_GROUP_"),
+        dict(OFPGC_ = "OF_GROUP_MOD_COMMAND_"),
         dict(OFPGT_ = "OF_GROUP_TYPE_"),
         dict(OFPG_ = "OF_GROUP_"),
         dict(OFPET_ = "OF_ERROR_TYPE_"),
@@ -116,6 +116,7 @@
         dict(OFPMP_ = "OF_MULTIPART_"),
         dict(OFPMPF_ = "OF_MULTIPART_FLAG_"),
         dict(OFPTFPT_ = "OF_TABLE_FEATURE_"),
+        dict(OFPHET = "OF_HELLO_ELEM_TYPE_"),
         dict(NX_ROLE_ = "OF_NICIRA_CONTROLLER_ROLE_"),
         ]
 
@@ -123,5 +124,4 @@
         for id_from, id_to in entry.items():
             if re.match(id_from, ident):
                 return re.sub(id_from, id_to, ident)
-    return None
-
+    return ident
diff --git a/loxi_front_end/type_maps.py b/c_gen/type_maps.py
similarity index 60%
rename from loxi_front_end/type_maps.py
rename to c_gen/type_maps.py
index df002c4..875a34e 100644
--- a/loxi_front_end/type_maps.py
+++ b/c_gen/type_maps.py
@@ -33,11 +33,12 @@
 # to wire value.
 #
 
-import of_g
+import c_gen.of_g_legacy as of_g
 import sys
 from generic_utils import *
-import oxm
 import loxi_utils.loxi_utils as loxi_utils
+import c_gen.loxi_utils_legacy as loxi_utils
+import loxi_globals
 
 invalid_type = "invalid_type"
 invalid_value = "0xeeee"  # Note, as a string
@@ -54,311 +55,119 @@
 ################################################################
 
 instruction_types = {
-    # version 1.0
-    of_g.VERSION_1_0:dict(),
-
-    # version 1.1
-    of_g.VERSION_1_1:dict(
-        goto_table = 1,
-        write_metadata = 2,
-        write_actions = 3,
-        apply_actions = 4,
-        clear_actions = 5,
-        experimenter = 0xffff
-        ),
-
-    # version 1.2
-    of_g.VERSION_1_2:dict(
-        goto_table = 1,
-        write_metadata = 2,
-        write_actions = 3,
-        apply_actions = 4,
-        clear_actions = 5,
-        experimenter = 0xffff
-        ),
-
-    # version 1.3
-    of_g.VERSION_1_3:dict(
-        goto_table = 1,
-        write_metadata = 2,
-        write_actions = 3,
-        apply_actions = 4,
-        clear_actions = 5,
-        meter = 6,
-        experimenter = 0xffff
-        )
-    }
-
-of_1_3_action_types = dict(
-    output       = 0,
-    copy_ttl_out = 11,
-    copy_ttl_in  = 12,
-    set_mpls_ttl = 15,
-    dec_mpls_ttl = 16,
-    push_vlan    = 17,
-    pop_vlan     = 18,
-    push_mpls    = 19,
-    pop_mpls     = 20,
-    set_queue    = 21,
-    group        = 22,
-    set_nw_ttl   = 23,
-    dec_nw_ttl   = 24,
-    set_field    = 25,
-    push_pbb     = 26,
-    pop_pbb      = 27,
-    experimenter = 0xffff,
-    bsn_mirror = 0xffff,
-    bsn_set_tunnel_dst = 0xffff,
-    nicira_dec_ttl = 0xffff
-    )
-
-# Indexed by OF version
-action_types = {
-    # version 1.0
-    of_g.VERSION_1_0:dict(
-        output = 0,
-        set_vlan_vid = 1,
-        set_vlan_pcp = 2,
-        strip_vlan = 3,
-        set_dl_src = 4,
-        set_dl_dst = 5,
-        set_nw_src = 6,
-        set_nw_dst = 7,
-        set_nw_tos = 8,
-        set_tp_src = 9,
-        set_tp_dst = 10,
-        enqueue = 11,
-        experimenter = 0xffff,
-        bsn_mirror = 0xffff,
-        bsn_set_tunnel_dst = 0xffff,
-        nicira_dec_ttl = 0xffff
-        ),
-
-    # version 1.1
-    of_g.VERSION_1_1:dict(
-        output = 0,
-        set_vlan_vid = 1,
-        set_vlan_pcp = 2,
-        set_dl_src = 3,
-        set_dl_dst = 4,
-        set_nw_src = 5,
-        set_nw_dst = 6,
-        set_nw_tos = 7,
-        set_nw_ecn = 8,
-        set_tp_src = 9,
-        set_tp_dst = 10,
-        copy_ttl_out = 11,
-        copy_ttl_in = 12,
-        set_mpls_label = 13,
-        set_mpls_tc = 14,
-        set_mpls_ttl = 15,
-        dec_mpls_ttl = 16,
-        push_vlan = 17,
-        pop_vlan = 18,
-        push_mpls = 19,
-        pop_mpls = 20,
-        set_queue = 21,
-        group = 22,
-        set_nw_ttl = 23,
-        dec_nw_ttl = 24,
-        experimenter = 0xffff,
-        bsn_mirror = 0xffff,
-        bsn_set_tunnel_dst = 0xffff,
-        nicira_dec_ttl = 0xffff
-        ),
-
-    # version 1.2
-    of_g.VERSION_1_2:dict(
-        output       = 0,
-        copy_ttl_out = 11,
-        copy_ttl_in  = 12,
-        set_mpls_ttl = 15,
-        dec_mpls_ttl = 16,
-        push_vlan    = 17,
-        pop_vlan     = 18,
-        push_mpls    = 19,
-        pop_mpls     = 20,
-        set_queue    = 21,
-        group        = 22,
-        set_nw_ttl   = 23,
-        dec_nw_ttl   = 24,
-        set_field    = 25,
-        experimenter = 0xffff,
-        bsn_mirror = 0xffff,
-        bsn_set_tunnel_dst = 0xffff,
-        nicira_dec_ttl = 0xffff
-        ),
-
-    # version 1.3
-    of_g.VERSION_1_3:of_1_3_action_types
-
-    }
-
-action_id_types = {
-    # version 1.0
     of_g.VERSION_1_0:dict(),
     of_g.VERSION_1_1:dict(),
     of_g.VERSION_1_2:dict(),
-    of_g.VERSION_1_3:of_1_3_action_types
+    of_g.VERSION_1_3:dict()
+    }
+
+instruction_id_types = {
+    of_g.VERSION_1_0:dict(),
+    of_g.VERSION_1_1:dict(),
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict()
+    }
+
+action_types = {
+    of_g.VERSION_1_0:dict(),
+    of_g.VERSION_1_1:dict(),
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict(),
+    }
+
+action_id_types = {
+    of_g.VERSION_1_0:dict(),
+    of_g.VERSION_1_1:dict(),
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict(),
     }
 
 queue_prop_types = {
+    of_g.VERSION_1_0:dict(),
+    of_g.VERSION_1_1:dict(),
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict()
+    }
+
+bsn_vport_types = {
     # version 1.0
     of_g.VERSION_1_0:dict(
-        min_rate      = 1,
-        # experimenter  = 0xffff
+        q_in_q      = 0,
         ),
     # version 1.1
     of_g.VERSION_1_1:dict(
-        min_rate      = 1,
-        #  experimenter  = 0xffff
+        q_in_q      = 0,
         ),
     # version 1.2
     of_g.VERSION_1_2:dict(
-        min_rate      = 1,
-        max_rate      = 2,
-        experimenter  = 0xffff
+        q_in_q      = 0,
         ),
     # version 1.3
     of_g.VERSION_1_3:dict(
-        min_rate      = 1,
-        max_rate      = 2,
-        experimenter  = 0xffff
+        q_in_q      = 0,
         )
     }
 
 oxm_types = {
-    # version 1.0
     of_g.VERSION_1_0:dict(),
-
-    # version 1.1
     of_g.VERSION_1_1:dict(),
-
-    # version 1.2
-    of_g.VERSION_1_2:oxm.oxm_wire_type,
-
-    # version 1.3
-    of_g.VERSION_1_3:oxm.oxm_wire_type  # FIXME needs update for 1.3?
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict(),
     }
 
 hello_elem_types = {
-    # version 1.0
     of_g.VERSION_1_0:dict(),
-
-    # version 1.1
     of_g.VERSION_1_1:dict(),
-
-    # version 1.2
     of_g.VERSION_1_2:dict(),
-
-    # version 1.3
-    of_g.VERSION_1_3:dict(
-        versionbitmap = 1
-        )
+    of_g.VERSION_1_3:dict(),
     }
 
 table_feature_prop_types = {
-    # version 1.0
     of_g.VERSION_1_0:dict(),
-
-    # version 1.1
     of_g.VERSION_1_1:dict(),
-
-    # version 1.2
     of_g.VERSION_1_2:dict(),
-
-    # version 1.3
-    of_g.VERSION_1_3:dict(
-        instructions           = 0,
-        instructions_miss      = 1,
-        next_tables            = 2,
-        next_tables_miss       = 3,
-        write_actions          = 4,
-        write_actions_miss     = 5,
-        apply_actions          = 6,
-        apply_actions_miss     = 7,
-        match                  = 8,
-        wildcards              = 10,
-        write_setfield         = 12,
-        write_setfield_miss    = 13,
-        apply_setfield         = 14,
-        apply_setfield_miss    = 15,
-#        experimenter           = 0xFFFE,
-#        experimenter_miss      = 0xFFFF,
-        experimenter            = 0xFFFF,  # Wrong: should be experimenter_miss
-        )
+    of_g.VERSION_1_3:dict(),
     }
 
 meter_band_types = {
-    # version 1.0
     of_g.VERSION_1_0:dict(),
-
-    # version 1.1
     of_g.VERSION_1_1:dict(),
-
-    # version 1.2
     of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict(),
+    }
 
-    # version 1.3
-    of_g.VERSION_1_3:dict(
-        drop                   = 1,
-        dscp_remark            = 2,
-        experimenter           = 0xFFFF,
-        )
+bsn_tlv_types = {
+    of_g.VERSION_1_0:dict(),
+    of_g.VERSION_1_1:dict(),
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict(),
     }
 
 # All inheritance data for non-messages
 inheritance_data = dict(
     of_instruction = instruction_types,
+    of_instruction_id = instruction_id_types,
     of_action = action_types,
     of_action_id = action_id_types,
     of_oxm = oxm_types,
     of_queue_prop = queue_prop_types,
     of_hello_elem = hello_elem_types,
     of_table_feature_prop = table_feature_prop_types,
-    of_meter_band = meter_band_types
+    of_meter_band = meter_band_types,
+    # BSN specific inheritance extensions
+    of_bsn_vport = bsn_vport_types,
+    of_bsn_tlv = bsn_tlv_types,
     )
 
-################################################################
-# Now generate the maps from parent to list of subclasses
-################################################################
-
-# # These lists have entries which are a fixed type, no inheritance
-# fixed_lists = [
-#     "of_list_bucket",
-#     "of_list_bucket_counter",
-#     "of_list_flow_stats_entry",
-#     "of_list_group_desc_stats_entry",
-#     "of_list_group_stats_entry",
-#     "of_list_packet_queue",
-#     "of_list_port_desc",
-#     "of_list_port_stats_entry",
-#     "of_list_queue_stats_entry",
-#     "of_list_table_stats_entry"
-#     ]
-
-# for cls in fixed_lists:
-#     base_type = list_to_entry_type(cls)
-#     of_g.inheritance_map[base_type] = [base_type]
-
-inheritance_map = dict()
-for parent, versioned in inheritance_data.items():
-    inheritance_map[parent] = set()
-    for ver, subclasses in versioned.items():
-        for subcls in subclasses:
-            inheritance_map[parent].add(subcls)
-
 def class_is_virtual(cls):
     """
     Returns True if cls is a virtual class
     """
-    if cls in inheritance_map:
-        return True
     if cls.find("header") > 0:
         return True
     if loxi_utils.class_is_list(cls):
         return True
-    return False
+    return loxi_globals.unified.class_by_name(cls).virtual
 
 ################################################################
 #
@@ -366,124 +175,45 @@
 #
 ################################################################
 
+# The hardcoded message types are for inheritance parents
 message_types = {
     # version 1.0
     of_g.VERSION_1_0:dict(
-        hello                   = 0,
         error_msg               = 1,
-        echo_request            = 2,
-        echo_reply              = 3,
         experimenter            = 4,
-        features_request        = 5,
-        features_reply          = 6,
-        get_config_request      = 7,
-        get_config_reply        = 8,
-        set_config              = 9,
-        packet_in               = 10,
-        flow_removed            = 11,
-        port_status             = 12,
-        packet_out              = 13,
         flow_mod                = 14,
-        port_mod                = 15,
         stats_request           = 16,
         stats_reply             = 17,
-        barrier_request         = 18,
-        barrier_reply           = 19,
-        queue_get_config_request = 20,
-        queue_get_config_reply  = 21,
-        table_mod               = 22    # Unofficial 1.0 extension
         ),
 
     # version 1.1
     of_g.VERSION_1_1:dict(
-        hello                   = 0,
         error_msg               = 1,
-        echo_request            = 2,
-        echo_reply              = 3,
         experimenter            = 4,
-        features_request        = 5,
-        features_reply          = 6,
-        get_config_request      = 7,
-        get_config_reply        = 8,
-        set_config              = 9,
-        packet_in               = 10,
-        flow_removed            = 11,
-        port_status             = 12,
-        packet_out              = 13,
         flow_mod                = 14,
         group_mod               = 15,
-        port_mod                = 16,
-        table_mod               = 17,
         stats_request           = 18,
         stats_reply             = 19,
-        barrier_request         = 20,
-        barrier_reply           = 21,
-        queue_get_config_request = 22,
-        queue_get_config_reply  = 23
         ),
 
     # version 1.2
     of_g.VERSION_1_2:dict(
-        hello                   = 0,
         error_msg               = 1,
-        echo_request            = 2,
-        echo_reply              = 3,
         experimenter            = 4,
-        features_request        = 5,
-        features_reply          = 6,
-        get_config_request      = 7,
-        get_config_reply        = 8,
-        set_config              = 9,
-        packet_in               = 10,
-        flow_removed            = 11,
-        port_status             = 12,
-        packet_out              = 13,
         flow_mod                = 14,
         group_mod               = 15,
-        port_mod                = 16,
-        table_mod               = 17,
         stats_request           = 18,
         stats_reply             = 19,
-        barrier_request         = 20,
-        barrier_reply           = 21,
-        queue_get_config_request = 22,
-        queue_get_config_reply   = 23,
-        role_request            = 24,
-        role_reply              = 25,
         ),
 
     # version 1.3
     of_g.VERSION_1_3:dict(
-        hello                   = 0,
         error_msg               = 1,
-        echo_request            = 2,
-        echo_reply              = 3,
         experimenter            = 4,
-        features_request        = 5,
-        features_reply          = 6,
-        get_config_request      = 7,
-        get_config_reply        = 8,
-        set_config              = 9,
-        packet_in               = 10,
-        flow_removed            = 11,
-        port_status             = 12,
-        packet_out              = 13,
         flow_mod                = 14,
         group_mod               = 15,
-        port_mod                = 16,
-        table_mod               = 17,
         stats_request           = 18,  # FIXME Multipart
         stats_reply             = 19,
-        barrier_request         = 20,
-        barrier_reply           = 21,
-        queue_get_config_request = 22,
-        queue_get_config_reply   = 23,
-        role_request            = 24,
-        role_reply              = 25,
-        async_get_request       = 26,
-        async_get_reply         = 27,
-        async_set               = 28,
-        meter_mod               = 29
         )
     }
 
@@ -549,7 +279,11 @@
         meter_features = 11,
         table_features = 12,
         port_desc = 13,
-        experimenter = 0xffff
+        experimenter = 0xffff,
+        bsn_lacp = 0xffff,
+        bsn_switch_pipeline = 0xffff,
+        bsn_port_counter = 0xffff,
+        bsn_vlan_counter = 0xffff
         )
     }
 
@@ -633,10 +367,36 @@
         )
     }
 
+group_mod_types = {
+    # version 1.0
+    of_g.VERSION_1_0:dict(),
+
+    # version 1.1
+    of_g.VERSION_1_1:dict(
+        add = 0,
+        modify = 1,
+        delete = 2
+        ),
+
+    # version 1.2
+    of_g.VERSION_1_2:dict(
+        add = 0,
+        modify = 1,
+        delete = 2
+        ),
+
+    # version 1.3
+    of_g.VERSION_1_3:dict(
+        add = 0,
+        modify = 1,
+        delete = 2
+        )
+    }
+
 ##
 # These are the objects whose length is specified by an external
 # reference, specifically another data member in the class.
-# 
+#
 #external_length_spec = {
 #    ("of_packet_out", "actions", OF_VERSION_1_0) : "actions_len",
 #    ("of_packet_out", "actions", OF_VERSION_1_1) : "actions_len",
@@ -647,27 +407,35 @@
 
 ################################################################
 #
-# type_val is the primary data structure that maps an 
+# type_val is the primary data structure that maps an
 # (class_name, version) pair to the wire data type value
 #
 ################################################################
 
 type_val = dict()
+inheritance_map = dict()
 
-for version, classes in message_types.items():
-    for cls in classes:
-        name = "of_" + cls
-        type_val[(name, version)] = classes[cls]
+def generate_maps():
+    for parent, versioned in inheritance_data.items():
+        inheritance_map[parent] = set()
+        for ver, subclasses in versioned.items():
+            for subcls in subclasses:
+                inheritance_map[parent].add(subcls)
 
-for parent, versioned in inheritance_data.items():
-    for version, subclasses in versioned.items():
-        for subcls, value in subclasses.items():
-            name = parent + "_" + subcls
-            type_val[(name, version)] = value
+    for version, classes in message_types.items():
+        for cls in classes:
+            name = "of_" + cls
+            type_val[(name, version)] = classes[cls]
 
-# Special case OF-1.2 match type
-type_val[("of_match_v3", of_g.VERSION_1_2)] = 0x8000
-type_val[("of_match_v3", of_g.VERSION_1_3)] = 0x8000
+    for parent, versioned in inheritance_data.items():
+        for version, subclasses in versioned.items():
+            for subcls, value in subclasses.items():
+                name = parent + "_" + subcls
+                type_val[(name, version)] = value
+
+    # Special case OF-1.2 match type
+    type_val[("of_match_v3", of_g.VERSION_1_2)] = 1
+    type_val[("of_match_v3", of_g.VERSION_1_3)] = 1
 
 # Utility function
 def dict_to_array(d, m_val, def_val=-1):
@@ -697,7 +465,7 @@
     Given versioned information about a type, calculate how long
     the unified array should be.
 
-    @param version_indexed A dict indexed by version. Each value is a 
+    @param version_indexed A dict indexed by version. Each value is a
     dict indexed by a name and whose value is an integer
     @param max_val Ignore values greater than this for length calcs
     """
@@ -709,52 +477,6 @@
             arr_len = len(ar)
     return arr_len
 
-# FIXME:  Need to move update for multipart messages
-
-stats_reply_list = [
-    "of_aggregate_stats_reply",
-    "of_desc_stats_reply",
-    "of_experimenter_stats_reply",
-    "of_flow_stats_reply",
-    "of_group_stats_reply",
-    "of_group_desc_stats_reply",
-    "of_group_features_stats_reply",
-    "of_meter_stats_reply",
-    "of_meter_config_stats_reply",
-    "of_meter_features_stats_reply",
-    "of_port_stats_reply",
-    "of_port_desc_stats_reply",
-    "of_queue_stats_reply",
-    "of_table_stats_reply",
-    "of_table_features_stats_reply"
-]
-
-stats_request_list = [
-    "of_aggregate_stats_request",
-    "of_desc_stats_request",
-    "of_experimenter_stats_request",
-    "of_flow_stats_request",
-    "of_group_stats_request",
-    "of_group_desc_stats_request",
-    "of_group_features_stats_request",
-    "of_meter_stats_request",
-    "of_meter_config_stats_request",
-    "of_meter_features_stats_request",
-    "of_port_stats_request",
-    "of_port_desc_stats_request",
-    "of_queue_stats_request",
-    "of_table_stats_request",
-    "of_table_features_stats_request"
-]
-
-flow_mod_list = [
-    "of_flow_add",
-    "of_flow_modify",
-    "of_flow_modify_strict",
-    "of_flow_delete",
-    "of_flow_delete_strict"
-]
-
 def sub_class_map(base_type, version):
     """
     Returns an iterable object giving the instance nameys and subclass types
@@ -784,52 +506,20 @@
     # version 1.0
     of_g.VERSION_1_0:dict(  # Version 1.0 extensions
         bsn = {   # BSN extensions; indexed by class name, value is subtype
-            "of_bsn_set_ip_mask"             : 0,
-            "of_bsn_get_ip_mask_request"     : 1,
-            "of_bsn_get_ip_mask_reply"       : 2,
-            "of_bsn_set_mirroring"           : 3,
-            "of_bsn_get_mirroring_request"   : 4,
-            "of_bsn_get_mirroring_reply"     : 5,
-            "of_bsn_shell_command"           : 6,
-            "of_bsn_shell_output"            : 7,
-            "of_bsn_shell_status"            : 8,
-            "of_bsn_get_interfaces_request"  : 9,
-            "of_bsn_get_interfaces_reply"    : 10,
-            "of_bsn_set_pktin_suppression"   : 11,
             },
         nicira = {   # Nicira extensions, value is subtype
-            "of_nicira_controller_role_request"      : 10,
-            "of_nicira_controller_role_reply"        : 11,
             },
         ),
     of_g.VERSION_1_1:dict(  # Version 1.0 extensions
         bsn = {   # BSN extensions; indexed by class name, value is subtype
-            "of_bsn_set_mirroring"           : 3,
-            "of_bsn_get_mirroring_request"   : 4,
-            "of_bsn_get_mirroring_reply"     : 5,
-            "of_bsn_get_interfaces_request"  : 9,
-            "of_bsn_get_interfaces_reply"    : 10,
-            "of_bsn_set_pktin_suppression"   : 11,
             },
         ),
     of_g.VERSION_1_2:dict(  # Version 1.0 extensions
         bsn = {   # BSN extensions; indexed by class name, value is subtype
-            "of_bsn_set_mirroring"           : 3,
-            "of_bsn_get_mirroring_request"   : 4,
-            "of_bsn_get_mirroring_reply"     : 5,
-            "of_bsn_get_interfaces_request"  : 9,
-            "of_bsn_get_interfaces_reply"    : 10,
-            "of_bsn_set_pktin_suppression"   : 11,
             },
         ),
     of_g.VERSION_1_3:dict(  # Version 1.0 extensions
         bsn = {   # BSN extensions; indexed by class name, value is subtype
-            "of_bsn_set_mirroring"           : 3,
-            "of_bsn_get_mirroring_request"   : 4,
-            "of_bsn_get_mirroring_reply"     : 5,
-            "of_bsn_get_interfaces_request"  : 9,
-            "of_bsn_get_interfaces_reply"    : 10,
-            "of_bsn_set_pktin_suppression"   : 11,
             },
         ),
 }
@@ -840,38 +530,26 @@
     # version 1.0
     of_g.VERSION_1_0:dict(  # Version 1.0 extensions
         bsn = {   # of_action_bsn_
-            "of_action_bsn_mirror"           : 1,
-            "of_action_bsn_set_tunnel_dst"   : 2,
             },
         nicira = {   # of_action_nicira_
-            "of_action_nicira_dec_ttl"       : 18,
             }
         ),
     of_g.VERSION_1_1:dict(  # Version 1.0 extensions
         bsn = {   # of_action_bsn_
-            "of_action_bsn_mirror"           : 1,
-            "of_action_bsn_set_tunnel_dst"   : 2,
             },
         nicira = {   # of_action_nicira_
-            "of_action_nicira_dec_ttl"       : 18,
             }
         ),
     of_g.VERSION_1_2:dict(  # Version 1.0 extensions
         bsn = {   # of_action_bsn_
-            "of_action_bsn_mirror"           : 1,
-            "of_action_bsn_set_tunnel_dst"   : 2,
             },
         nicira = {   # of_action_nicira_
-            "of_action_nicira_dec_ttl"       : 18,
             }
         ),
     of_g.VERSION_1_3:dict(  # Version 1.0 extensions
         bsn = {   # of_action_bsn_
-            "of_action_bsn_mirror"           : 1,
-            "of_action_bsn_set_tunnel_dst"   : 2,
             },
         nicira = {   # of_action_nicira_
-            "of_action_nicira_dec_ttl"       : 18,
             }
         ),
 }
@@ -885,17 +563,25 @@
     of_g.VERSION_1_2:dict(),
     of_g.VERSION_1_3:dict(  # Version 1.3 extensions
         bsn = {   # of_action_bsn_
-            "of_action_id_bsn_mirror"           : 1,
-            "of_action_id_bsn_set_tunnel_dst"   : 2,
             },
         nicira = {   # of_action_nicira_
-            "of_action_id_nicira_dec_ttl"       : 18,
             }
         ),
 }
 
 # Set to empty dict if no extension instructions defined
-extension_instruction_subtype = {}
+extension_instruction_subtype = {
+    # version 1.0
+    of_g.VERSION_1_0:dict(),
+    of_g.VERSION_1_1:dict(),
+    of_g.VERSION_1_2:dict(),
+    of_g.VERSION_1_3:dict(
+        bsn = {   # of_instruction_bsn_
+            },
+        nicira = {   # of_instruction_nicira_
+            }
+        ),
+}
 
 # Set to empty dict if no extension instructions defined
 extension_queue_prop_subtype = {}
@@ -923,7 +609,7 @@
 
     This is brute force; we search all extension data for a match
     """
-    
+
     for ext_obj in extension_objects:
         for version, exp_list in ext_obj.items():
             for exp_name, classes in exp_list.items():
@@ -998,7 +684,7 @@
                     return True
 
     return False
-    
+
 ################################################################
 # These are extension message specific
 ################################################################
diff --git a/c_gen/util.py b/c_gen/util.py
index d4b25bf..54e0f80 100644
--- a/c_gen/util.py
+++ b/c_gen/util.py
@@ -30,12 +30,13 @@
 """
 import os
 import loxi_utils.loxi_utils as utils
+import template_utils as template_utils
 
 templates_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
 template_path = [templates_dir, templates_dir + '/locitest']
 
 def render_template(out, name, **context):
-    utils.render_template(out, name, template_path, context)
+    template_utils.render_template(out, name, template_path, context)
 
 def render_static(out, name):
-    utils.render_static(out, name, template_path)
+    template_utils.render_static(out, name, template_path)
diff --git a/cmdline.py b/cmdline.py
new file mode 100644
index 0000000..aafa019
--- /dev/null
+++ b/cmdline.py
@@ -0,0 +1,108 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+from optparse import OptionParser
+
+from loxi_globals import OFVersions
+
+##
+# Command line options
+options = {}
+
+##
+# Command line arguments
+args = []
+
+##@var config_default
+# The default configuration dictionary for LOXI code generation
+options_default = {
+    "lang"               : "c",
+    "version-list"       : "1.0 1.1 1.2 1.3",
+    "install-dir"        : "loxi_output",
+}
+
+def lang_normalize(lang):
+    """
+    Normalize the representation of the language
+    """
+    return lang.lower()
+
+def version_list_normalize(vlist):
+    """
+    Normalize the version list and return as an array
+    """
+    out_list = []
+    # @fixme Map to OF version references
+    if vlist.find(',') > 0:
+        vlist = vlist.split(',')
+    else:
+        vlist = vlist.split()
+    vlist.sort()
+    for ver in vlist:
+        try:
+            out_list.append(OFVersions.from_string(ver))
+        except KeyError:
+            sys.stderr.write("Bad version input, %s" % str(ver))
+            sys.exit(1)
+    return out_list
+
+def process_commandline(default_vals=options_default):
+    """
+    Set up the options dictionary
+
+    @param cfg_dflt The default configuration dictionary
+    @return A pair (options, args) as per parser return
+    """
+    global options
+    global args
+    global target_version_list
+
+    parser = OptionParser(version="%prog 0.1")
+
+    #@todo Add options via dictionary
+    parser.add_option("--list-files", action="store_true", default=False,
+                      help="List output files generated")
+    parser.add_option("-l", "--lang", "--language",
+                      default=default_vals["lang"],
+                      help="Select the target language: c, python")
+    parser.add_option("-i", "--install-dir",
+                      default=default_vals["install-dir"],
+                      help="Directory to install generated files to (default %s)" % default_vals["install-dir"])
+    parser.add_option("-v", "--verbose",
+                      action="store_true", default=False,
+                      help="Debug output")
+
+    parser.add_option("-V", "--version-list",
+                      default=default_vals["version-list"],
+                      help="Specify the versions to target as 1.0 1.1 etc")
+
+    (options, args) = parser.parse_args()
+
+    options.lang = lang_normalize(options.lang)
+    target_version_list = version_list_normalize(options.version_list)
+    target_version_list.sort()
+    return (options, args, target_version_list)
diff --git a/generic_utils.py b/generic_utils.py
index cebfb7f..1cfba86 100644
--- a/generic_utils.py
+++ b/generic_utils.py
@@ -30,28 +30,10 @@
 
 Intended to be imported into another namespace
 """
-
+import logging
+import collections
+import functools
 import sys
-import of_g
-
-
-################################################################
-#
-# Configuration related
-#
-################################################################
-
-def config_check(str, dictionary = of_g.code_gen_config):
-    """
-    Return config value if in dictionary; else return False.
-    @param str The lookup index
-    @param dictionary The dict to check; use code_gen_config if None
-    """
-
-    if str in dictionary:
-        return dictionary[str]
-
-    return False
 
 ################################################################
 #
@@ -61,15 +43,174 @@
 
 def debug(obj):
     """
-    Debug output to the current both the log file and debug output
-    @param out_str The stringified output to write
+    Legacy logging method. Delegate to logging.debug.
+    Use logging.debug directly in the future.
     """
-    of_g.loxigen_dbg_file.write(str(obj) + "\n")
-    log(obj)
+    logging.debug(obj)
 
 def log(obj):
     """
-    Log output to the current global log file
-    @param out_str The stringified output to write
+    Legacy logging method. Delegate to logging.info.
+    Use logging.info directly in the future.S
     """
-    of_g.loxigen_log_file.write(str(obj) + "\n")
+    logging.info(obj)
+
+################################################################
+#
+# Memoize
+#
+################################################################
+
+def memoize(obj):
+    """ A function/method decorator that memoizes the result"""
+    cache = obj.cache = {}
+
+    @functools.wraps(obj)
+    def memoizer(*args, **kwargs):
+        key = args + tuple(kwargs.items())
+        if key not in cache:
+            cache[key] = obj(*args, **kwargs)
+        return cache[key]
+    return memoizer
+
+################################################################
+#
+# OrderedSet
+#
+################################################################
+
+class OrderedSet(collections.MutableSet):
+    """
+    A set implementations that retains insertion order.  From the receipe
+    http://code.activestate.com/recipes/576694/
+    as referred to in the python documentation
+    """
+
+    def __init__(self, iterable=None):
+        self.end = end = []
+        end += [None, end, end]         # sentinel node for doubly linked list
+        self.map = {}                   # key --> [key, prev, next]
+        if iterable is not None:
+            self |= iterable
+
+    def __len__(self):
+        return len(self.map)
+
+    def __contains__(self, key):
+        return key in self.map
+
+    def add(self, key):
+        if key not in self.map:
+            end = self.end
+            curr = end[1]
+            curr[2] = end[1] = self.map[key] = [key, curr, end]
+
+    def discard(self, key):
+        if key in self.map:
+            key, prev, next = self.map.pop(key)
+            prev[2] = next
+            next[1] = prev
+
+    def __iter__(self):
+        end = self.end
+        curr = end[2]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[2]
+
+    def __reversed__(self):
+        end = self.end
+        curr = end[1]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[1]
+
+    def pop(self, last=True):
+        if not self:
+            raise KeyError('set is empty')
+        key = self.end[1][0] if last else self.end[2][0]
+        self.discard(key)
+        return key
+
+    def __repr__(self):
+        if not self:
+            return '%s()' % (self.__class__.__name__,)
+        return '%s(%r)' % (self.__class__.__name__, list(self))
+
+    def __eq__(self, other):
+        if isinstance(other, OrderedSet):
+            return len(self) == len(other) and list(self) == list(other)
+        return set(self) == set(other)
+
+################################################################
+#
+# OrderedDefaultDict
+#
+################################################################
+
+class OrderedDefaultDict(collections.OrderedDict):
+    """
+    A Dictionary that maintains insertion order where missing values
+    are provided by a factory function, i.e., a combination of
+    the semantics of collections.defaultdict and collections.OrderedDict.
+    """
+    def __init__(self, default_factory=None, *a, **kw):
+        if (default_factory is not None and
+                not callable(default_factory)):
+            raise TypeError('first argument must be callable')
+        collections.OrderedDict.__init__(self, *a, **kw)
+        self.default_factory = default_factory
+
+    def __getitem__(self, key):
+        try:
+            return collections.OrderedDict.__getitem__(self, key)
+        except KeyError:
+            return self.__missing__(key)
+
+    def __missing__(self, key):
+        if self.default_factory is None:
+            raise KeyError(key)
+        self[key] = value = self.default_factory()
+        return value
+
+    def __reduce__(self):
+        if self.default_factory is None:
+            args = tuple()
+        else:
+            args = self.default_factory,
+        return type(self), args, None, None, self.items()
+
+    def copy(self):
+        return self.__copy__()
+
+    def __copy__(self):
+        return type(self)(self.default_factory, self)
+
+    def __deepcopy__(self, memo):
+        import copy
+        return type(self)(self.default_factory,
+                          copy.deepcopy(self.items()))
+    def __repr__(self):
+        return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
+                                        collections.OrderedDict.__repr__(self))
+
+
+def find(func, iterable):
+    """
+    find the first item in iterable for which func returns something true'ish.
+    @returns None if no item in iterable fulfills the condition
+    """
+    for i in iterable:
+        if func(i):
+            return i
+    return None
+
+def count(func, iteratable):
+    """
+    count how the number of items in iterable for which func returns something true'ish.
+    """
+    c = 0
+    for i in iterable:
+        if func(i):
+            c +=1
+    return c
diff --git a/java_gen/.classpath b/java_gen/.classpath
new file mode 100644
index 0000000..050cd9c
--- /dev/null
+++ b/java_gen/.classpath
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry kind="lib" path="pre-written/lib/netty-3.2.6.Final.jar"/>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/java_gen/.gitignore b/java_gen/.gitignore
new file mode 100644
index 0000000..84c1d8e
--- /dev/null
+++ b/java_gen/.gitignore
@@ -0,0 +1,2 @@
+*.class
+bin/
diff --git a/java_gen/.project b/java_gen/.project
new file mode 100644
index 0000000..b347bd6
--- /dev/null
+++ b/java_gen/.project
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>openflowj-loxi</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+	</natures>
+</projectDescription>
diff --git a/java_gen/README.java-lang b/java_gen/README.java-lang
new file mode 100644
index 0000000..754c13b
--- /dev/null
+++ b/java_gen/README.java-lang
@@ -0,0 +1 @@
+Work in progress to port OpenFlow/J to support OpenFlow 1.0, 1.1., 1.2, and 1.3.
diff --git a/java_gen/__init__.py b/java_gen/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/java_gen/__init__.py
diff --git a/java_gen/codegen.py b/java_gen/codegen.py
new file mode 100644
index 0000000..9a10bdf
--- /dev/null
+++ b/java_gen/codegen.py
@@ -0,0 +1,165 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+"""
+@brief Main Java Generation module
+"""
+
+import logging
+import pdb
+import os
+import shutil
+
+import loxi_globals
+from loxi_ir import *
+import lang_java
+import test_data
+from collections import namedtuple
+from import_cleaner import ImportCleaner
+
+import template_utils
+import loxi_utils.loxi_utils as loxi_utils
+
+import java_gen.java_model as java_model
+
+logger = logging.getLogger(__name__)
+
+def gen_all_java(install_dir):
+    basedir= '%s/openflowj' % install_dir
+    logger.info("Outputting to %s" % basedir)
+    if os.path.exists(basedir):
+        shutil.rmtree(basedir)
+    os.makedirs(basedir)
+    copy_prewrite_tree(basedir)
+    gen = JavaGenerator(basedir, JavaGeneratorOptions(instrument=True))
+    gen.create_of_interfaces()
+    gen.create_of_classes()
+    gen.create_of_const_enums()
+    gen.create_of_factories()
+
+JavaGeneratorOptions = namedtuple("JavaGeneratorOptions", ("instrument",))
+
+class JavaGenerator(object):
+    templates_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
+
+    def __init__(self, basedir, gen_opts):
+        self.basedir = basedir
+        self.java_model = java_model.model
+        self.gen_opts = gen_opts
+
+    def render_class(self, clazz, template, src_dir=None, **context):
+        if not src_dir:
+            src_dir = "gen-src/main/java/"
+
+        context['class_name'] = clazz.name
+        context['package'] = clazz.package
+        context['template_dir'] = self.templates_dir
+        context['genopts']= self.gen_opts
+
+        filename = os.path.join(self.basedir, src_dir, "%s/%s.java" % (clazz.package.replace(".", "/"), clazz.name))
+        dirname = os.path.dirname(filename)
+        if not os.path.exists(dirname):
+            os.makedirs(dirname)
+        prefix = '//::(?=[ \t]|$)'
+        logger.debug("rendering filename: %s" % filename)
+        with open(filename, "w") as f:
+            template_utils.render_template(f, template, [self.templates_dir], context, prefix=prefix)
+
+        try:
+            cleaner = ImportCleaner(filename)
+            cleaner.find_used_imports()
+            cleaner.rewrite_file(filename)
+        except:
+            logger.info('Cannot clean imports from file %s' % filename)
+
+    def create_of_const_enums(self):
+        for enum in self.java_model.enums:
+            if enum.name in ["OFPort"]:
+                continue
+            self.render_class(clazz=enum,
+                    template='const.java', enum=enum, all_versions=self.java_model.versions)
+
+            for version in enum.versions:
+                clazz = java_model.OFGenericClass(package="org.projectfloodlight.openflow.protocol.ver{}".format(version.dotless_version), name="{}SerializerVer{}".format(enum.name, version.dotless_version))
+
+                if enum.is_bitmask:
+                    self.render_class(clazz=clazz, template="const_set_serializer.java", enum=enum, version=version)
+                else:
+                    self.render_class(clazz=clazz, template="const_serializer.java", enum=enum, version=version)
+
+    def create_of_interfaces(self):
+        """ Create the base interfaces for of classes"""
+        for interface in self.java_model.interfaces:
+            #if not utils.class_is_message(interface.c_name):
+            #    continue
+            self.render_class(clazz=interface,
+                    template="of_interface.java", msg=interface)
+
+    def create_of_classes(self):
+        """ Create the OF classes with implementations for each of the interfaces and versions """
+        for interface in self.java_model.interfaces:
+            for java_class in interface.versioned_classes:
+                if self.java_model.generate_class(java_class):
+                    if not java_class.is_virtual:
+                        self.render_class(clazz=java_class,
+                                template='of_class.java', version=java_class.version, msg=java_class,
+                                impl_class=java_class.name)
+
+                        self.create_unit_test(java_class.unit_test)
+                    else:
+                        disc = java_class.discriminator
+                        if disc:
+                            self.render_class(clazz=java_class,
+                                template='of_virtual_class.java', version=java_class.version, msg=java_class,
+                                impl_class=java_class.name, model=self.java_model)
+                        else:
+                            logger.warn("Class %s virtual but no discriminator" % java_class.name)
+                else:
+                    logger.info("Class %s ignored by generate_class" % java_class.name)
+
+    def create_unit_test(self, unit_tests):
+        if unit_tests.has_test_data:
+            for i in range(unit_tests.length):
+                unit_test = unit_tests.get_test_unit(i)
+                if unit_test.has_test_data:
+                    self.render_class(clazz=unit_test,
+                            template='unit_test.java', src_dir="gen-src/test/java",
+                            version=unit_test.java_class.version,
+                            test=unit_test, msg=unit_test.java_class,
+                            test_data=unit_test.test_data)
+
+    def create_of_factories(self):
+        for factory in self.java_model.of_factories:
+            self.render_class(clazz=factory, template="of_factory_interface.java", factory=factory)
+            for factory_class in factory.factory_classes:
+                self.render_class(clazz=factory_class, template="of_factory_class.java", factory=factory_class, model=self.java_model)
+            self.render_class(clazz=java_model.OFGenericClass(package="org.projectfloodlight.openflow.protocol", name="OFFactories"), template="of_factories.java", versions=self.java_model.versions)
+
+def copy_prewrite_tree(basedir):
+    """ Recursively copy the directory structure from ./java_gen/pre-write
+       into $basedir"""
+    logger.info("Copying pre-written files into %s" % basedir)
diff --git a/java_gen/import_cleaner.py b/java_gen/import_cleaner.py
new file mode 100755
index 0000000..d84d76b
--- /dev/null
+++ b/java_gen/import_cleaner.py
@@ -0,0 +1,107 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+# A utility for naively cleaning of redundant 'import' statements in java source file.
+# This utility searches the name of the imported class in the code and if not found,
+# removes the corresponding import statement.
+# This utility assumes class/package naming that follows java naming conventions.
+
+import sys
+import re
+
+class ImportLine:
+    def __init__(self, line):
+        self.line = line
+        class_name = None
+        if line[len(line) - 1] == '*':
+            class_name = '*'
+        else:
+            i = 7
+            while i < len(line) - 1:
+                if re.match('\.[A-Z][\..]*$', line[i - 1 : len(line) - 1]):
+                    class_name = line[i : len(line) - 1]
+                    break
+                i = i + 1
+            if class_name is None:
+                class_name = line[line.rfind('.') + 1 : len(line) - 1]
+        self.class_name = class_name
+
+
+class ImportCleaner:
+    def __init__(self, path):
+        f = open(path)
+        self.imp_lines = []
+        self.code_lines = []
+        self.imports_first_line = -1
+        i = 0
+        for line in f:
+            if len(line) > 6 and re.match('^[ \t]*import ', line):
+                self.imp_lines.append(ImportLine(line.rstrip()))
+                if self.imports_first_line == -1:
+                    self.imports_first_line = i
+            else:
+                self.code_lines.append(line.rstrip())
+            i = i + 1
+        f.close()
+
+    def find_used_imports(self):
+        self.used_imports = []
+        for line in self.code_lines:
+            temp = []
+            for imp in self.imp_lines:
+                if imp.class_name == '*' or line.find(imp.class_name) > -1:
+                    temp.append(imp)
+            for x in temp:
+                self.imp_lines.remove(x)
+                self.used_imports.append(x)
+
+    def rewrite_file(self, path):
+        f = open(path, 'w')
+        imports_written = False
+        for i in range(len(self.code_lines)):
+            if not imports_written and self.imports_first_line == i:
+                # Put all imports
+                for imp in self.used_imports:
+                    f.write(imp.line + '\n')
+                imports_written = True
+            # Put next code line
+            f.write(self.code_lines[i] + '\n')
+        f.close()
+
+def main(argv):
+    if len(argv) != 2:
+        print 'Usage: ImportCleaner <java file>'
+        return
+
+    filename = argv[1]
+    print 'Cleaning imports from file %s' % (filename)
+    cleaner = ImportCleaner(filename)
+    cleaner.find_used_imports()
+    cleaner.rewrite_file(filename)
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/java_gen/java_model.py b/java_gen/java_model.py
new file mode 100644
index 0000000..8726632
--- /dev/null
+++ b/java_gen/java_model.py
@@ -0,0 +1,1140 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+# Prototype of an Intermediate Object model for the java code generator
+# A lot of this stuff could/should probably be merged with the python utilities
+
+import collections
+from collections import namedtuple, defaultdict, OrderedDict
+import logging
+import os
+import pdb
+import re
+
+from generic_utils import find, memoize, OrderedSet, OrderedDefaultDict
+from loxi_globals import OFVersions
+import loxi_globals
+from loxi_ir import *
+import loxi_utils.loxi_utils as loxi_utils
+import test_data
+
+import java_gen.java_type as java_type
+from java_gen.java_type import erase_type_annotation
+
+logger = logging.getLogger(__name__)
+
+
+def java_class_name(c_name):
+    return java_type.name_c_to_caps_camel(c_name) if c_name != "of_header" else "OFMessage"
+
+class JavaModel(object):
+    # registry for enums that should not be generated
+    # set(${java_enum_name})
+    enum_blacklist = set(("OFDefinitions", "OFPortNo", "OFVlanId", "OFGroup"))
+    # registry for enum *entry* that should not be generated
+    # map: ${java_enum_name} -> set(${java_entry_entry_name})
+    enum_entry_blacklist = defaultdict(lambda: set(), OFFlowWildcards=set([ "NW_DST_BITS", "NW_SRC_BITS", "NW_SRC_SHIFT", "NW_DST_SHIFT" ]))
+    # registry of interfaces that should not be generated
+    # set(java_names)
+    # OFUint structs are there for god-knows what in loci. We certainly don't need them.
+    interface_blacklist = set( ("OFUint8", "OFUint32",))
+    # registry of interface properties that should not be generated
+    # map: $java_type -> set(java_name_property)
+    read_blacklist = defaultdict(lambda: set(),
+        OFExperimenter=set(('data','subtype')),
+        OFActionExperimenter=set(('data',)),
+        OFExperimenterStatsRequest=set(('data','subtype')),
+        OFExperimenterStatsReply=set(('data','subtype')),
+        OFInstructionExperimenter=set(('data',)))
+    # map: $java_type -> set(java_name_property)
+    write_blacklist = defaultdict(
+        lambda: set(),
+        OFOxm=set(('typeLen',)),
+        OFAction=set(('type',)),
+        OFInstruction=set(('type',)),
+        OFFlowMod=set(('command', )),
+        OFExperimenter=set(('data','subtype')),
+        OFActionExperimenter=set(('data',)),
+        OFBsnTlv=set(('type',)))
+    # interfaces that are virtual
+    virtual_interfaces = set(['OFOxm', 'OFInstruction', 'OFFlowMod', 'OFBsnVport' ])
+
+    # Registry of nullable properties:
+    # ${java_class_name} -> set(${java_property_name})
+    nullable_map = defaultdict(lambda: set(),
+    )
+
+    # represents a subgroup of a bitmask enum that is actualy a normal enumerable within a masked part of the enum
+    # e.g., the flags STP.* in OF1.0 port state are bit mask entries, but instead enumerables according to the mask "STP_MASK"
+    # name: a name for the group
+    # mask: java name of the enum entry that defines the mask
+    # members: set of names of the members of the group
+    MaskedEnumGroup = namedtuple("MaskedEnumGroup", ("name", "mask", "members"))
+
+    # registry of MaskedEnumGroups (see above).
+    # map: ${java_enum_name}: tuple(MaskedEnumGroup)
+    masked_enum_groups = defaultdict(lambda: (),
+            OFPortState = (MaskedEnumGroup("stp_flags", mask="STP_MASK", members=set(("STP_LISTEN", "STP_LEARN", "STP_FORWARD", "STP_BLOCK"))), ),
+            OFConfigFlags = (
+                MaskedEnumGroup("frag_flags", mask="FRAG_MASK", members=set(("FRAG_NORMAL", "FRAG_DROP", "FRAG_REASM"))),
+            ),
+            OFTableConfig = (
+                MaskedEnumGroup("table_miss_flags", mask="TABLE_MISS_MASK", members=set(("TABLE_MISS_CONTROLLER", "TABLE_MISS_CONTINUE", "TABLE_MISS_DROP"))),
+            ),
+            OFGetConfigReply = (
+                MaskedEnumGroup("flags", mask="OFP_FRAG_MASK", members=set(("FRAG_NORMAL", "FRAG_DROP", "FRAG_REASM"))),
+            ),
+            OFSetConfig = (
+                MaskedEnumGroup("flags", mask="OFP_FRAG_MASK", members=set(("FRAG_NORMAL", "FRAG_DROP", "FRAG_REASM"))),
+            ),
+    )
+
+    # represents a metadata property associated with an EnumClass
+    # name:
+    class OFEnumPropertyMetadata(namedtuple("OFEnumPropertyMetadata", ("name", "type", "value"))):
+        """
+        represents a metadata property associated with an Enum Class
+        @param name name of metadata property
+        @param type java_type instance describing the type
+        @value: Generator function f(entry) that generates the value
+        """
+        @property
+        def variable_name(self):
+            return self.name[0].lower() + self.name[1:]
+
+        @property
+        def getter_name(self):
+            prefix = "is" if self.type == java_type.boolean else "get"
+            return prefix+self.name
+
+    """ Metadata container. """
+    OFEnumMetadata = namedtuple("OFEnumMetadata", ("properties", "to_string"))
+
+    def gen_port_speed(enum_entry):
+        """ Generator function for OFortFeatures.PortSpeed"""
+        splits = enum_entry.name.split("_")
+        if len(splits)>=2:
+            m = re.match(r'\d+[MGTP]B', splits[1])
+            if m:
+                return "PortSpeed.SPEED_{}".format(splits[1])
+        return "PortSpeed.SPEED_NONE";
+
+    def gen_stp_state(enum_entry):
+        """ Generator function for OFPortState.StpState"""
+        splits = enum_entry.name.split("_")
+        if len(splits)>=1:
+            if splits[0] == "STP":
+                return "true"
+        return "false"
+
+    # registry for metadata properties for enums
+    # map: ${java_enum_name}: OFEnumMetadata
+    enum_metadata_map = defaultdict(lambda: JavaModel.OFEnumMetadata((), None),
+            OFPortFeatures = OFEnumMetadata((OFEnumPropertyMetadata("PortSpeed", java_type.port_speed, gen_port_speed),), None),
+            OFPortState = OFEnumMetadata((OFEnumPropertyMetadata("StpState", java_type.boolean, gen_stp_state),), None),
+    )
+
+    @property
+    @memoize
+    def versions(self):
+        return OrderedSet( JavaOFVersion(ir_version) for ir_version in OFVersions.target_versions)
+
+    @property
+    @memoize
+    def interfaces(self):
+        interfaces = [ JavaOFInterface(ir_class) for ir_class in loxi_globals.unified.classes ]
+        interfaces = [ i for i in interfaces if i.name not in self.interface_blacklist ]
+
+        return interfaces
+
+    @memoize
+    def interface_by_name(self, name):
+        return find(lambda i: erase_type_annotation(i.name) == erase_type_annotation(name), self.interfaces)
+
+    @property
+    @memoize
+    def all_classes(self):
+        return [clazz for interface in self.interfaces for clazz in interface.versioned_classes]
+
+    @property
+    @memoize
+    def enums(self):
+        name_version_enum_map = OrderedDefaultDict(lambda: OrderedDict())
+
+        for version in self.versions:
+            logger.info("version: {}".format(version.ir_version))
+            of_protocol = loxi_globals.ir[version.ir_version]
+            for enum in of_protocol.enums:
+                name_version_enum_map[enum.name][version] = enum
+
+        enums = [ JavaEnum(name, version_enum_map) for name, version_enum_map,
+                        in name_version_enum_map.items() ]
+
+        # inelegant - need java name here
+        enums = [ enum for enum in enums if enum.name not in self.enum_blacklist ]
+        return enums
+
+    @memoize
+    def enum_by_name(self, name):
+        res = find(lambda e: e.name == name, self.enums)
+        if not res:
+            raise KeyError("Could not find enum with name %s" % name)
+        return res
+
+    @property
+    @memoize
+    def of_factories(self):
+        prefix = "org.projectfloodlight.openflow.protocol"
+
+        factories = OrderedDict()
+
+        sub_factory_classes = ("OFAction", "OFInstruction", "OFMeterBand", "OFOxm", "OFQueueProp", "OFErrorMsg", "OFActionId", "OFInstructionId", "OFBsnTlv")
+        for base_class in sub_factory_classes:
+            package = base_class[2:].lower()
+            remove_prefix = base_class[2].lower() + base_class[3:]
+
+            # HACK need to have a better way to deal with parameterized base classes
+            annotated_base_class = base_class + "<?>" if base_class == "OFOxm" else base_class
+
+            factories[base_class] = OFFactory(package="%s.%s" % (prefix, package),
+                    name=base_class + "s", members=[], remove_prefix=remove_prefix, base_class=annotated_base_class, sub_factories={}, xid_generator= (base_class == "OFErrorMsg"))
+
+        factories[""] = OFFactory(
+                    package=prefix,
+                    name="OFFactory",
+                    remove_prefix="",
+                    members=[], base_class="OFMessage", sub_factories=OrderedDict(
+                        ("{}{}s".format(n[2].lower(), n[3:]), "{}s".format(n)) for n in sub_factory_classes ),
+                    xid_generator=True)
+
+        for i in self.interfaces:
+            for n, factory in factories.items():
+                if n == "":
+                    factory.members.append(i)
+                    break
+                else:
+                    super_class = self.interface_by_name(n)
+                    if i.is_instance_of(super_class):
+                        factory.members.append(i)
+                        break
+        return factories.values()
+
+    @memoize
+    def factory_of(self, interface):
+        for factory in self.of_factories:
+            if interface in factory.members:
+                return factory
+        return None
+
+    def generate_class(self, clazz):
+        """ return wether or not to generate implementation class clazz.
+            Now true for everything except OFTableModVer10.
+            @param clazz JavaOFClass instance
+        """
+        if clazz.interface.name.startswith("OFMatchV"):
+            return True
+        elif clazz.name == "OFTableModVer10":
+            # tablemod ver 10 is a hack and has no oftype defined
+            return False
+        if loxi_utils.class_is_message(clazz.interface.c_name):
+            return True
+        if loxi_utils.class_is_oxm(clazz.interface.c_name):
+            return True
+        if loxi_utils.class_is_action(clazz.interface.c_name):
+            return True
+        if loxi_utils.class_is_instruction(clazz.interface.c_name):
+            return True
+        else:
+            return True
+
+    @property
+    @memoize
+    def oxm_map(self):
+        OxmMapEntry = namedtuple("OxmMapEntry", ["type_name", "value", "masked" ])
+        return OrderedDict( (oxm.name, OxmMapEntry(type_name=oxm.member_by_name("value").java_type.public_type,
+                                       value=re.sub(r'^of_oxm_', r'', re.sub(r'_masked$', r'', oxm.ir_class.name)).upper(),
+                                       masked=oxm.ir_class.name.endswith("_masked")))
+                  for oxm in self.interfaces if oxm.ir_class.is_subclassof("of_oxm") )
+
+class OFFactory(namedtuple("OFFactory", ("package", "name", "members", "remove_prefix", "base_class", "sub_factories", "xid_generator"))):
+    @property
+    def factory_classes(self):
+            return [ OFFactoryClass(
+                    package="org.projectfloodlight.openflow.protocol.ver{}".format(version.dotless_version),
+                    name="{}Ver{}".format(self.name, version.dotless_version),
+                    interface=self,
+                    version=version
+                    ) for version in model.versions ]
+
+    def method_name(self, member, builder=True):
+        n = member.variable_name
+        if n.startswith(self.remove_prefix):
+            n = n[len(self.remove_prefix):]
+            n = n[0].lower() + n[1:]
+        if builder:
+            return "build" + n[0].upper() + n[1:]
+        else:
+            return n
+
+    def of_version(self, version):
+        for fc in self.factory_classes:
+            if fc.version == version:
+                return fc
+        return None
+
+OFGenericClass = namedtuple("OFGenericClass", ("package", "name"))
+class OFFactoryClass(namedtuple("OFFactoryClass", ("package", "name", "interface", "version"))):
+    @property
+    def base_class(self):
+        return self.interface.base_class
+
+    @property
+    def versioned_base_class(self):
+        base_class_interface = model.interface_by_name(self.interface.base_class)
+        if base_class_interface and base_class_interface.has_version(self.version):
+            return base_class_interface.versioned_class(self.version)
+        else:
+            return None
+
+model = JavaModel()
+
+#######################################################################
+### OFVersion
+#######################################################################
+
+class JavaOFVersion(object):
+    """ Models a version of OpenFlow. contains methods to convert the internal
+        Loxi version to a java constant / a string """
+    def __init__(self, ir_version):
+        assert isinstance(ir_version, OFVersion)
+        self.ir_version = ir_version
+        self.int_version = self.ir_version.wire_version
+
+    @property
+    def dotless_version(self):
+        return self.ir_version.version.replace(".", "")
+
+    @property
+    def constant_version(self):
+        return "OF_" + self.dotless_version
+
+    def __repr__(self):
+        return "JavaOFVersion(%d)" % self.int_version
+
+    def __str__(self):
+        return self.ir_version.version
+
+    def __hash__(self):
+        return hash(self.ir_version)
+
+    def __eq__(self, other):
+        if other is None or type(self) != type(other):
+            return False
+        return (self.ir_version,) == (other.ir_version,)
+
+#######################################################################
+### Interface
+#######################################################################
+
+class JavaOFInterface(object):
+    """ Models an OpenFlow Message class for the purpose of the java class.
+        Version agnostic, in contrast to the loxi_ir python model.
+    """
+    def __init__(self, ir_class):
+        """"
+        @param c_name: loxi style name (e.g., of_flow_add)
+        @param version_map map of { JavaOFVersion: OFClass (from loxi_ir) }
+        """
+        self.ir_class = ir_class
+        self.c_name = ir_class.name
+        self.version_map = { JavaOFVersion(v): c for v,c in ir_class.version_classes.items() }
+        # name: the Java Type name, e.g., OFFlowAdd
+        self.name = java_class_name(self.c_name)
+        # variable_name name to use for variables of this type. i.e., flowAdd
+        self.variable_name = self.name[2].lower() + self.name[3:]
+        self.title_name = self.variable_name[0].upper() + self.variable_name[1:]
+        # name for use in constants: FLOW_ADD
+        self.constant_name = self.c_name.upper().replace("OF_", "")
+
+        pck_suffix, parent_interface, self.type_annotation = self.class_info()
+        self.package = "org.projectfloodlight.openflow.protocol.%s" % pck_suffix if pck_suffix else "org.projectfloodlight.openflow.protocol"
+        if self.name != parent_interface:
+            self.parent_interface = parent_interface
+        else:
+            self.parent_interface = None
+
+    @property
+    @memoize
+    def all_parent_interfaces(self):
+        return [ "OFObject" ] + \
+               ([ self.parent_interface ] if self.parent_interface else [] )+ \
+               self.additional_parent_interfaces
+    @property
+    @memoize
+    def additional_parent_interfaces(self):
+        if loxi_utils.class_is_message(self.c_name) and not self.is_virtual:
+            m = re.match(r'(.*)Request$', self.name)
+            if m:
+                reply_name = m.group(1) + "Reply"
+                if model.interface_by_name(reply_name):
+                    return ["OFRequest<%s>" % reply_name ]
+        return []
+
+
+    def is_instance_of(self, other_class):
+        if self == other_class:
+            return True
+        parent = self.super_class
+        if parent is None:
+            return False
+        else:
+            return parent.is_instance_of(other_class)
+
+    @property
+    def super_class(self):
+        if not self.parent_interface:
+            return None
+        else:
+            return model.interface_by_name(self.parent_interface)
+
+
+    def inherited_declaration(self, type_spec="?"):
+        if self.type_annotation:
+            return "%s<%s>" % (self.name, type_spec)
+        else:
+            return "%s" % self.name
+
+    @property
+    def type_variable(self):
+        if self.type_annotation:
+            return "<T>"
+        else:
+            return "";
+
+    def class_info(self):
+        """ return tuple of (package_prefix, parent_class) for the current JavaOFInterface"""
+        # FIXME: This code could be cleaned up further. Maybe some of the exceptions
+        # here could be folded into ir, or the type arithmetic specified in a more general
+        # fashion
+        def calc_package(i):
+            if i.is_subclassof("of_error_msg"):
+                return "errormsg"
+            elif i.is_instanceof("of_action"):
+                return "action"
+            elif i.is_instanceof("of_action_id"):
+                return "actionid"
+            elif i.is_instanceof("of_instruction"):
+                return "instruction"
+            elif i.is_instanceof("of_instruction_id"):
+                return "instructionid"
+            elif i.is_instanceof("of_oxm"):
+                return "oxm"
+            elif i.is_instanceof("of_meter_band"):
+                return "meterband"
+            elif i.is_instanceof("of_queue_prop"):
+                return "queueprop"
+            elif i.is_instanceof("of_bsn_tlv"):
+                return "bsntlv"
+            else:
+                return ""
+
+        def calc_super_name(i):
+            if re.match('of_match_.*', i.name):
+                return "Match"
+            else:
+                ir_super_class = self.ir_class.superclass
+                return java_class_name(ir_super_class.name) if ir_super_class else ""
+
+        package = calc_package(self.ir_class)
+        super_name = calc_super_name(self.ir_class)
+
+        if self.name == "OFStatsRequest":
+            # stats_requests are special because of their type annotation
+            return (package, "OFMessage", "T extends OFStatsReply")
+        elif self.ir_class.is_subclassof('of_stats_request'):
+            # stats_request subclasses  are special because of their type annotation
+            reply_name = re.sub(r'Request$', 'Reply', self.name)
+            super_type_annotation = "T" if self.ir_class.virtual else reply_name
+
+            type_annotation = "T extends {}".format(reply_name) if self.ir_class.virtual \
+                    else ""
+
+            return (package, "{}<{}>".format(super_name, super_type_annotation),
+                    type_annotation)
+        elif self.name == "OFOxm":
+            return (package, None, "T extends OFValueType<T>")
+        elif loxi_utils.class_is_oxm(self.c_name):
+            # look up type from member value for OFValueType type annotation
+            if self.member_by_name("value") is not None:
+                return (package, "OFOxm<%s>" % self.member_by_name("value").java_type.public_type, None)
+            else:
+                return (package, "OFOxm", None)
+        else:
+            return (package, super_name, None)
+
+    @property
+    @memoize
+    def writeable_members(self):
+        return [ m for m in self.members if m.is_writeable ]
+
+    @memoize
+    def member_by_name(self, name):
+        return find(lambda m: m.name == name, self.members)
+
+    @property
+    @memoize
+    def members(self):
+        return self.ir_model_members + self.virtual_members
+
+    @property
+    @memoize
+    def ir_model_members(self):
+        """return a list of all members to be exposed by this interface. Corresponds to
+           the union of the members of the vesioned classes without length, fieldlength
+           and pads (those are handled automatically during (de)serialization and not exposed"""
+        all_versions = []
+        member_map = collections.OrderedDict()
+
+        member_version_map = {}
+        for (version, of_class) in self.version_map.items():
+            for of_member in of_class.members:
+                if isinstance(of_member, OFLengthMember) or \
+                   isinstance(of_member, OFFieldLengthMember) or \
+                   isinstance(of_member, OFPadMember):
+                    continue
+                java_member = JavaMember.for_of_member(self, of_member)
+                if of_member.name not in member_map:
+                    member_map[of_member.name] = java_member
+                    member_version_map[of_member.name] = version
+                else:
+                    existing = member_map[of_member.name]
+
+                    if existing.java_type.public_type != java_member.java_type.public_type:
+                        raise Exception(
+                             "Error constructing interface {}: type signatures do not match up between versions.\n"
+                             " Member Name: {}\n"
+                             " Existing: Version={}, Java={}, IR={}\n"
+                             " New:      Version={}, Java={}, IR={}"
+                               .format(self.name, existing.name,
+                                   member_version_map[of_member.name], existing.java_type.public_type, existing.member.oftype,
+                                   version, java_member.java_type.public_type, java_member.member.oftype)
+                        )
+
+        return tuple(m for m in member_map.values() if m.name not in model.read_blacklist[self.name])
+
+    @property
+    def virtual_members(self):
+        virtual_members = []
+        if self.name == "OFOxm":
+            virtual_members += [
+                    JavaVirtualMember(self, "value", java_type.generic_t),
+                    JavaVirtualMember(self, "mask", java_type.generic_t),
+                    JavaVirtualMember(self, "matchField", java_type.make_match_field_jtype("T")),
+                    JavaVirtualMember(self, "masked", java_type.boolean),
+                    JavaVirtualMember(self, "canonical", java_type.make_oxm_jtype("T"))
+                   ]
+        elif self.ir_class.is_subclassof("of_oxm"):
+            value = find(lambda m: m.name=="value", self.ir_model_members)
+            if value:
+                field_type = java_type.make_match_field_jtype(value.java_type.public_type)
+            else:
+                field_type = java_type.make_match_field_jtype()
+
+            virtual_members += [
+                    JavaVirtualMember(self, "matchField", field_type),
+                    JavaVirtualMember(self, "masked", java_type.boolean),
+                    JavaVirtualMember(self, "canonical", java_type.make_oxm_jtype(value.java_type.public_type),
+                            custom_template=lambda builder: "OFOxm{}_getCanonical.java".format(".Builder" if builder else "")),
+                   ]
+            if not find(lambda x: x.name == "mask", self.ir_model_members):
+                virtual_members.append(
+                        JavaVirtualMember(self, "mask", find(lambda x: x.name == "value", self.ir_model_members).java_type))
+
+        if not find(lambda m: m.name == "version", self.ir_model_members):
+            virtual_members.append(JavaVirtualMember(self, "version", java_type.of_version))
+
+        return tuple(virtual_members)
+
+    @property
+    @memoize
+    def is_virtual(self):
+        """ Is this interface virtual. If so, do not generate a builder interface """
+        return self.name in model.virtual_interfaces or all(ir_class.virtual for ir_class in self.version_map.values())
+
+    @property
+    def is_universal(self):
+        """ Is this interface universal, i.e., does it exist in all OF versions? """
+        return len(self.all_versions) == len(model.versions)
+
+    @property
+    @memoize
+    def all_versions(self):
+        """ return list of all versions that this interface exists in """
+        return self.version_map.keys()
+
+    def has_version(self, version):
+        return version in self.version_map
+
+    def versioned_class(self, version):
+        return JavaOFClass(self, version, self.version_map[version])
+
+    @property
+    @memoize
+    def versioned_classes(self):
+            return [ self.versioned_class(version) for version in self.all_versions ]
+
+#######################################################################
+### (Versioned) Classes
+#######################################################################
+
+class JavaOFClass(object):
+    """ Models an OpenFlow Message class for the purpose of the java class.
+        Version specific child of a JavaOFInterface
+    """
+    def __init__(self, interface, version, ir_class):
+        """
+        @param interface JavaOFInterface instance of the parent interface
+        @param version JavaOFVersion
+        @param ir_class OFClass from loxi_ir
+        """
+        self.interface = interface
+        self.ir_class = ir_class
+        self.c_name = self.ir_class.name
+        self.version = version
+        self.constant_name = self.c_name.upper().replace("OF_", "")
+        self.package = "org.projectfloodlight.openflow.protocol.ver%s" % version.dotless_version
+        self.generated = False
+
+    @property
+    @memoize
+    def unit_test(self):
+        return JavaUnitTestSet(self)
+
+    @property
+    def name(self):
+        return "%sVer%s" % (self.interface.name, self.version.dotless_version)
+
+    @property
+    def variable_name(self):
+        return self.name[2].lower() + self.name[3:]
+
+    @property
+    def length(self):
+        if self.is_fixed_length:
+            return self.min_length
+        else:
+            raise Exception("No fixed length for class %s, version %s" % (self.name, self.version))
+
+    @property
+    def min_length(self):
+        """ @return the minimum wire length of an instance of this class in bytes """
+        return self.ir_class.base_length
+
+    @property
+    def is_fixed_length(self):
+        """ true iff this class serializes to a fixed length on the wire """
+        return self.ir_class.is_fixed_length and not self.is_virtual
+
+    def all_properties(self):
+        return self.interface.members
+
+    @property
+    @memoize
+    def data_members(self):
+        return [ prop for prop in self.members if prop.is_data ]
+
+    @property
+    @memoize
+    def fixed_value_members(self):
+        return [ prop for prop in self.members if prop.is_fixed_value ]
+
+    @property
+    @memoize
+    def public_members(self):
+        return [ prop for prop in self.members if prop.is_public ]
+
+    @property
+    @memoize
+    def members(self):
+        return self.ir_model_members + self.virtual_members
+
+    @property
+    @memoize
+    def ir_model_members(self):
+        members = [ JavaMember.for_of_member(self, of_member) for of_member in self.ir_class.members ]
+        return tuple(members)
+
+    @property
+    def virtual_members(self):
+        virtual_members = []
+        if self.ir_class.is_subclassof("of_oxm"):
+            value_member = find(lambda m: m.name, self.ir_model_members)
+            if value_member:
+                oxm_entry = model.oxm_map[self.interface.name]
+                virtual_members += [
+                    JavaVirtualMember(self, "matchField", java_type.make_match_field_jtype(value_member.java_type.public_type), "MatchField.%s" % oxm_entry.value),
+                    JavaVirtualMember(self, "masked", java_type.boolean, "true" if oxm_entry.masked else "false"),
+                    ]
+            else:
+                virtual_members += [
+                    JavaVirtualMember(self, "matchField", java_type.make_match_field_jtype(), "null"),
+                    JavaVirtualMember(self, "masked", java_type.boolean, "false"),
+                 ]
+        if not find(lambda m: m.name == "version", self.ir_model_members):
+            virtual_members.append(JavaVirtualMember(self, "version", java_type.of_version, "OFVersion.%s" % self.version.constant_version))
+
+        return tuple(virtual_members)
+
+    @memoize
+    def member_by_name(self, name):
+        return find(lambda m: m.name == name, self.members)
+
+    def all_versions(self):
+        return [ JavaOFVersion(int_version)
+                 for int_version in of_g.unified[self.c_name]
+                 if int_version != 'union' and int_version != 'object_id' ]
+
+    def version_is_inherited(self, version):
+        return 'use_version' in of_g.unified[self.ir_class.name][version.int_version]
+
+    def inherited_from(self, version):
+        return JavaOFVersion(of_g.unified[self.ir_class.name][version.int_version]['use_version'])
+
+    @property
+    def is_virtual(self):
+        return self.ir_class.virtual # type_maps.class_is_virtual(self.c_name) or self.ir_class.virtual
+
+    @property
+    def discriminator(self):
+        return find(lambda m: isinstance(m, OFDiscriminatorMember), self.ir_class.members)
+
+    @property
+    def is_extension(self):
+        return type_maps.message_is_extension(self.c_name, -1)
+
+    @property
+    def align(self):
+        return int(self.ir_class.params['align']) if 'align' in self.ir_class.params else 0
+
+    @property
+    def length_includes_align(self):
+        return self.ir_class.params['length_includes_align'] == "True" if 'length_includes_align' in self.ir_class.params else False
+
+    @property
+    @memoize
+    def superclass(self):
+        return find(lambda c: c.version == self.version and c.c_name == self.ir_class.superclass, model.all_classes)
+
+    @property
+    @memoize
+    def subclasses(self):
+        return [ c for c in model.all_classes if c.version == self.version and c.ir_class.superclass
+                   and c.ir_class.superclass.name == self.c_name ]
+
+#######################################################################
+### Member
+#######################################################################
+
+
+class JavaMember(object):
+    """ Models a property (member) of an openflow class. """
+    def __init__(self, msg, name, java_type, member):
+        self.msg = msg
+        self.name = name
+        self.java_type = java_type
+        self.member = member
+        self.c_name = self.member.name if(hasattr(self.member, "name")) else ""
+
+    @property
+    def title_name(self):
+        return self.name[0].upper() + self.name[1:]
+
+    @property
+    def constant_name(self):
+        return self.c_name.upper()
+
+    @property
+    def getter_name(self):
+        return ("is" if self.java_type.public_type == "boolean" else "get") + self.title_name
+
+    @property
+    def setter_name(self):
+        return "set" + self.title_name
+
+    @property
+    def default_name(self):
+        if self.is_fixed_value:
+            return self.constant_name
+        else:
+            return "DEFAULT_"+self.constant_name
+
+    @property
+    def default_value(self):
+        if self.is_fixed_value:
+            return self.enum_value
+        else:
+            default = self.java_type.default_op(self.msg.version)
+            if default == "null" and not self.is_nullable:
+                return None
+            else:
+                return default
+
+    @property
+    def enum_value(self):
+        if self.name == "version":
+            return "OFVersion.%s" % self.msg.version.constant_version
+
+        java_type = self.java_type.public_type;
+        try:
+            global model
+            enum = model.enum_by_name(java_type)
+            entry = enum.entry_by_version_value(self.msg.version, self.value)
+            return "%s.%s" % ( enum.name, entry.name)
+        except KeyError, e:
+            logger.debug("No enum found", e)
+            return self.value
+
+    @property
+    def is_pad(self):
+        return isinstance(self.member, OFPadMember)
+
+    def is_type_value(self, version=None):
+        if(version==None):
+            return any(self.is_type_value(version) for version in self.msg.all_versions)
+        try:
+            return self.c_name in get_type_values(self.msg.c_name, version.int_version)
+        except:
+            return False
+
+    @property
+    def is_field_length_value(self):
+        return isinstance(self.member, OFFieldLengthMember)
+
+    @property
+    def is_discriminator(self):
+        return isinstance(self.member, OFDiscriminatorMember)
+
+    @property
+    def is_length_value(self):
+        return isinstance(self.member, OFLengthMember)
+
+    @property
+    def is_public(self):
+        return not (self.is_pad or self.is_length_value)
+
+    @property
+    def is_data(self):
+        return isinstance(self.member, OFDataMember) and self.name != "version"
+
+    @property
+    def is_fixed_value(self):
+        return hasattr(self.member, "value") or self.name == "version" \
+                or ( self.name == "length" and self.msg.is_fixed_length) \
+                or ( self.name == "len" and self.msg.is_fixed_length)
+
+    @property
+    def value(self):
+        if self.name == "version":
+            return self.msg.version.int_version
+        elif self.name == "length" or self.name == "len":
+            return self.msg.length
+        else:
+            return self.java_type.format_value(self.member.value)
+
+    @property
+    def priv_value(self):
+        if self.name == "version":
+            return self.java_type.format_value(self.msg.version.int_version, pub_type=False)
+        elif self.name == "length" or self.name == "len":
+            return self.java_type.format_value(self.msg.length, pub_type=False)
+        else:
+            return self.java_type.format_value(self.member.value, pub_type=False)
+
+
+    @property
+    def is_writeable(self):
+        return self.is_data and not self.name in model.write_blacklist[self.msg.name]
+
+    def get_type_value_info(self, version):
+        return get_type_values(msg.c_name, version.int_version)[self.c_name]
+
+    @property
+    def length(self):
+        if hasattr(self.member, "length"):
+            return self.member.length
+        else:
+            count, base = loxi_utils.type_dec_to_count_base(self.member.type)
+            return of_g.of_base_types[base]['bytes'] * count
+
+    @staticmethod
+    def for_of_member(java_class, member):
+        if isinstance(member, OFPadMember):
+            return JavaMember(None, "", None, member)
+        else:
+            if member.name == 'len':
+                name = 'length'
+            elif member.name == 'value_mask':
+                name = 'mask'
+            elif member.name == 'group_id':
+                name = 'group'
+            else:
+                name = java_type.name_c_to_camel(member.name)
+            j_type = java_type.convert_to_jtype(java_class.c_name, member.name, member.oftype)
+            return JavaMember(java_class, name, j_type, member)
+
+    @property
+    def is_universal(self):
+        for version, ir_class in self.msg.ir_class.version_classes.items():
+            if not ir_class.member_by_name(self.member.name):
+                return False
+        return True
+
+    @property
+    def is_virtual(self):
+        return False
+
+    def __hash__(self):
+        return hash(self.name)
+
+    def __eq__(self, other):
+        if other is None or type(self) != type(other):
+            return False
+        return (self.name,) == (other.name,)
+
+    @property
+    def is_nullable(self):
+        return self.name in model.nullable_map[self.msg.name]
+
+
+class JavaVirtualMember(JavaMember):
+    """ Models a virtual property (member) of an openflow class that is not backed by a loxi ir member """
+    def __init__(self, msg, name, java_type, value=None, custom_template=None):
+        JavaMember.__init__(self, msg, name, java_type, member=None)
+        self._value = value
+        self.custom_template = custom_template
+
+    @property
+    def is_fixed_value(self):
+        return True
+
+    @property
+    def value(self):
+        return self._value
+
+    @property
+    def priv_value(self):
+        return self._value
+
+
+    @property
+    def is_universal(self):
+        return True
+
+    @property
+    def is_virtual(self):
+        return True
+
+#######################################################################
+### Unit Test
+#######################################################################
+
+class JavaUnitTestSet(object):
+    def __init__(self, java_class):
+        self.java_class = java_class
+        first_data_file_name = "of{version}/{name}.data".format(version=java_class.version.dotless_version,
+                                                     name=java_class.c_name[3:])
+        glob_file_name = "of{version}/{name}__*.data".format(version=java_class.version.dotless_version,
+                                                     name=java_class.c_name[3:])
+        test_class_name = self.java_class.name + "Test"
+        self.test_units = []
+        if test_data.exists(first_data_file_name):
+            self.test_units.append(JavaUnitTest(java_class, first_data_file_name, test_class_name))
+
+        i = 1
+        for f in test_data.glob(glob_file_name):
+            m = re.match(".*__(.*).data", f)
+            if m:
+                suffix = java_type.name_c_to_caps_camel(m.group(1))
+            else:
+                suffix = str(i)
+                i += 1
+            test_class_name = self.java_class.name + suffix + "Test"
+            self.test_units.append(JavaUnitTest(java_class, f, test_class_name))
+
+    @property
+    def package(self):
+        return self.java_class.package
+
+    @property
+    def has_test_data(self):
+        return len(self.test_units) > 0
+
+    @property
+    def length(self):
+        return len(self.test_units)
+
+    def get_test_unit(self, i):
+        return self.test_units[i]
+
+
+class JavaUnitTest(object):
+    def __init__(self, java_class, file_name=None, test_class_name=None):
+        self.java_class = java_class
+        if file_name is None:
+            self.data_file_name = "of{version}/{name}.data".format(version=java_class.version.dotless_version,
+                                                         name=java_class.c_name[3:])
+        else:
+            self.data_file_name = file_name
+        if test_class_name is None:
+            self.test_class_name = self.java_class.name + "Test"
+        else:
+            self.test_class_name = test_class_name
+
+    @property
+    def package(self):
+        return self.java_class.package
+
+    @property
+    def name(self):
+        return self.test_class_name
+    
+    @property
+    def interface(self):
+        return self.java_class.interface
+    
+    @property
+    def has_test_data(self):
+        return test_data.exists(self.data_file_name)
+
+    @property
+    @memoize
+    def test_data(self):
+        return test_data.read(self.data_file_name)
+
+
+#######################################################################
+### Enums
+#######################################################################
+
+class JavaEnum(object):
+    def __init__(self, c_name, version_enum_map):
+        self.c_name = c_name
+
+        self.name   = "OF" + java_type.name_c_to_caps_camel("_".join(c_name.split("_")[1:]))
+
+        # Port_features has constants that start with digits
+        self.name_prefix = "PF_" if self.name == "OFPortFeatures" else ""
+
+        self.version_enums = version_enum_map
+
+        entry_name_version_value_map = OrderedDefaultDict(lambda: OrderedDict())
+        for version, ir_enum in version_enum_map.items():
+            for ir_entry in ir_enum.entries:
+                entry_name_version_value_map[ir_entry.name][version] = ir_entry.value
+
+        self.entries = [ JavaEnumEntry(self, name, version_value_map)
+                         for (name, version_value_map) in entry_name_version_value_map.items() ]
+
+        self.entries = [ e for e in self.entries if e.name not in model.enum_entry_blacklist[self.name] ]
+        self.package = "org.projectfloodlight.openflow.protocol"
+
+        self.metadata = model.enum_metadata_map[self.name]
+
+    def wire_type(self, version):
+        ir_enum = self.version_enums[version]
+        if "wire_type" in ir_enum.params:
+            return java_type.convert_enum_wire_type_to_jtype(ir_enum.params["wire_type"])
+        else:
+            return java_type.u8
+
+    @property
+    @memoize
+    def is_bitmask(self):
+        return any(ir_enum.is_bitmask for ir_enum in self.version_enums.values())
+
+    @property
+    def versions(self):
+        return self.version_enums.keys()
+
+    @memoize
+    def entry_by_name(self, name):
+        res = find(lambda e: e.name == name, self.entries)
+        if res:
+            return res
+        else:
+            raise KeyError("Enum %s: no entry with name %s" % (self.name, name))
+
+    @memoize
+    def entry_by_c_name(self, name):
+        res = find(lambda e: e.c_name == name, self.entries)
+        if res:
+            return res
+        else:
+            raise KeyError("Enum %s: no entry with c_name %s" % (self.name, name))
+
+    @memoize
+    def entry_by_version_value(self, version, value):
+        res = find(lambda e: e.values[version] == value if version in e.values else False, self.entries)
+        if res:
+            return res
+        else:
+            raise KeyError("Enum %s: no entry with version %s, value %s" % (self.name, version, value))
+
+# values: Map JavaVersion->Value
+class JavaEnumEntry(object):
+    def __init__(self, enum, name, values):
+        self.enum = enum
+        self.name = enum.name_prefix + "_".join(name.split("_")[1:]).upper()
+        self.values = values
+
+    @property
+    def constructor_params(self):
+        return [ m.value(self) for m in self.enum.metadata.properties ]
+
+    def has_value(self, version):
+        return version in self.values
+
+    def value(self, version):
+        return self.values[version]
+
+    def format_value(self, version):
+        res = self.enum.wire_type(version).format_value(self.values[version])
+        return res
+
+    def all_values(self, versions, not_present=None):
+        return [ self.values[version] if version in self.values else not_present for version in versions ]
+
+    @property
+    @memoize
+    def masked_enum_group(self):
+        group = find(lambda g: self.name in g.members, model.masked_enum_groups[self.enum.name])
+        return group
+
+    @property
+    @memoize
+    def is_mask(self):
+        return any(self.name == g.mask for g in model.masked_enum_groups[self.enum.name])
diff --git a/java_gen/java_type.py b/java_gen/java_type.py
new file mode 100644
index 0000000..e57000f
--- /dev/null
+++ b/java_gen/java_type.py
@@ -0,0 +1,730 @@
+import errno
+import os
+import re
+import subprocess
+import time
+
+import loxi_globals
+from generic_utils import memoize
+import loxi_utils.loxi_utils as loxi_utils
+
+def erase_type_annotation(class_name):
+    m=re.match(r'(.*)<.*>', class_name)
+    if m:
+        return m.group(1)
+    else:
+        return class_name
+
+def name_c_to_camel(name):
+    """ 'of_stats_reply' -> 'ofStatsReply' """
+    name = re.sub(r'^_','', name)
+    tokens = name.split('_')
+    for i in range(1, len(tokens)):
+            tokens[i] = tokens[i].title()
+    return "".join(tokens)
+
+def name_c_to_caps_camel(name):
+    """ 'of_stats_reply' to 'OFStatsReply' """
+    camel = name_c_to_camel(name.title())
+    if camel.startswith('Ofp'):
+        return camel.replace('Ofp','OF',1)
+    elif camel.startswith('Of'):
+        return camel.replace('Of','OF',1)
+    else:
+        return camel
+
+java_primitive_types = set("boolean byte char short int long".split(" "))
+
+### info table about java primitive types, for casting literals in the source code
+# { name : (signed?, length_in_bits) }
+java_primitives_info = {
+        'boolean' : (False, 8, False),
+        'byte' : (True, 8, True),
+        'char' : (False, 16, True),
+        'short' : (True, 16, True),
+        'int' : (True, 32, False),
+        'long' : (True, 64, False),
+}
+
+def format_primitive_literal(t, value):
+    """ Format a primitive numeric literal for inclusion in the
+        java source code. Takes care of casting the literal
+        appropriately for correct representation despite Java's
+        signed-craziness
+    """
+    signed, bits, cast_needed = java_primitives_info[t]
+    max = (1 << bits)-1
+    if value > max:
+        raise Exception("Value %d to large for type %s" % (value, t))
+
+    if signed:
+        max_pos = (1 << (bits-1)) - 1
+
+        if  value > max_pos:
+            if t == "long":
+                return str((1 << bits) - value)
+            else:
+                return "(%s) 0x%x" % (t, value)
+    return "%s0x%x%s" % ("(%s) " % t if cast_needed else "", value, "L" if t=="long" else "")
+
+
+ANY = 0xFFFFFFFFFFFFFFFF
+
+class VersionOp:
+    def __init__(self, version=ANY, read=None, write=None, default=None, funnel=None):
+        self.version = version
+        self.read = read
+        self.write = write
+        self.default = default
+        self.funnel = funnel
+
+    def __str__(self):
+        return "[Version: %d, Read: '%s', Write: '%s', Default: '%s', Funnel: '%s' ]" % (self.version, self.read, self.write, self.default, self.funnel )
+
+### FIXME: This class should really be cleaned up
+class JType(object):
+    """ Wrapper class to hold C to Java type conversion information. JTypes can have a 'public'
+        and or 'private' java type associated with them and can define how those types can be
+        read from and written to ChannelBuffers.
+
+    """
+    def __init__(self, pub_type, priv_type=None):
+        self.pub_type = pub_type    # the type we expose externally, e.g. 'U8'
+        if priv_type is None:
+            priv_type = pub_type
+        self.priv_type = priv_type  # the internal storage type
+        self.ops = {}
+
+    def set_priv_type(self, priv_type):
+        self.priv_type = priv_type
+        return self
+
+    def op(self, version=ANY, read=None, write=None, default=None, funnel=None, pub_type=ANY):
+        """
+        define operations to be performed for reading and writing this type
+        (when read_op, write_op is called). The operations 'read' and 'write'
+        can either be strings ($name, and $version and $length will be replaced),
+        or callables (name, version and length) will be passed.
+
+        @param version int      OF version to define operation for, or ANY for all
+        @param pub_type boolean whether to define operations for the public type (True), the
+                                private type(False) or both (ALL)
+        @param read read expression (either string or callable)s
+        @param write write expression (either string or callable)
+        """
+
+        pub_types = [ pub_type ] if pub_type is not ANY else [ False, True ]
+        for pub_type in pub_types:
+            self.ops[(version, pub_type)] = VersionOp(version, read, write, default, funnel)
+        return self
+
+    def format_value(self, value, pub_type=True):
+        # Format a constant value of this type, for inclusion in the java source code
+        # For primitive types, takes care of casting the value appropriately, to
+        # cope with java's signedness limitation
+        t = self.pub_type if pub_type else self.priv_type
+        if t in java_primitive_types:
+            return format_primitive_literal(t, value)
+        else:
+            return value
+
+    @property
+    def public_type(self):
+        """ return the public type """
+        return self.pub_type
+
+    def priv(self):
+        """ return the public type """
+        return self.priv_type
+
+    def has_priv(self):
+        """ Is the private type different from the public one?"""
+        return self.pub_type != self.priv_type
+
+    def get_op(self, op_type, version, pub_type, default_value, arguments):
+        ver = ANY if version is None else version.int_version
+
+        if not "version" in arguments:
+            arguments["version"] = version.dotless_version
+
+        def lookup(ver, pub_type):
+            if (ver, pub_type) in self.ops:
+                return getattr(self.ops[(ver, pub_type)], op_type)
+            else:
+                return None
+
+        _op = lookup(ver, pub_type) or lookup(ANY, pub_type) or default_value
+        if callable(_op):
+            return _op(**arguments)
+        else:
+            return reduce(lambda a,repl: a.replace("$%s" % repl[0], str(repl[1])),  arguments.items(), _op)
+
+    def read_op(self, version=None, length=None, pub_type=True):
+        """ return a Java stanza that reads a value of this JType from ChannelBuffer bb.
+        @param version int - OF wire version to generate expression for
+        @param pub_type boolean use this JTypes 'public' (True), or private (False) representation
+        @param length string, for operations that need it (e.g., read a list of unknown length)
+               Java expression evaluating to the byte length to be read. Defaults to the remainig
+               length of the message.
+        @return string containing generated Java expression.
+        """
+        if length is None:
+             # assumes that
+             # (1) length of the message has been read to 'length'
+             # (2) readerIndex at the start of the message has been stored in 'start'
+            length = "length - (bb.readerIndex() - start)"
+
+        return self.get_op("read", version, pub_type,
+            default_value='ChannelUtilsVer$version.read%s(bb)' % self.pub_type,
+            arguments=dict(length=length)
+            )
+
+    def write_op(self, version=None, name=None, pub_type=True):
+        """ return a Java stanza that writes a value of this JType contained in Java expression
+        'name' to ChannelBuffer bb.
+        @param name string containing Java expression that evaluations to the value to be written
+        @param version int - OF wire version to generate expression for
+        @param pub_type boolean use this JTypes 'public' (True), or private (False) representation
+        @return string containing generated Java expression.
+        """
+
+        return self.get_op("write", version, pub_type,
+            default_value='ChannelUtilsVer$version.write%s(bb, $name)' % self.pub_type,
+            arguments=dict(name=name)
+            )
+
+
+    def default_op(self, version=None, pub_type=True):
+        """ return a Java stanza that returns a default value of this JType.
+        @param version JavaOFVersion
+        @return string containing generated Java expression.
+        """
+        return self.get_op("default", version, pub_type,
+            arguments = dict(),
+            default_value = self.format_value(0) if self.is_primitive else "null"
+        )
+
+    def skip_op(self, version=None, length=None):
+        """ return a java stanza that skips an instance of JType in the input ChannelBuffer 'bb'.
+            This is used in the Reader implementations for virtual classes (because after the
+            discriminator field, the concrete Reader instance will re-read all the fields)
+            Currently just delegates to read_op + throws away the result."""
+        return self.read_op(version, length)
+
+    def funnel_op(self, version=None, name=None, pub_type=True):
+        t = self.pub_type if pub_type else self.priv_type
+        return self.get_op("funnel", version, pub_type,
+            arguments = dict(name=name),
+            default_value =  '$name.putTo(sink)' if not self._is_primitive(pub_type) else "sink.put{}($name)".format(t[0].upper() + t[1:])
+        )
+
+    @property
+    def is_primitive(self):
+        return self._is_primitive()
+
+    def _is_primitive(self, pub_type=True):
+        """ return true if the pub_type is a java primitive type (and thus needs
+        special treatment, because it doesn't have methods)"""
+        t = self.pub_type if pub_type else self.priv_type
+        return t in java_primitive_types
+
+    @property
+    def is_array(self):
+        return self._is_array()
+
+    def _is_array(self, pub_type=True):
+        t = self.pub_type if pub_type else self.priv_type
+        return t.endswith("[]")
+
+# Create a default mapping for a list type. Type defauls to List<${java_mapping_of_name}>
+def gen_enum_jtype(java_name, is_bitmask=False):
+    if is_bitmask:
+        java_type = "Set<{}>".format(java_name)
+        default_value = "ImmutableSet.<{}>of()".format(java_name)
+    else:
+        java_type = java_name
+        default_value = "null"
+
+    serializer = "{}SerializerVer$version".format(java_name)
+
+    return JType(java_type)\
+            .op(read="{}.readFrom(bb)".format(serializer),
+                write="{}.writeTo(bb, $name)".format(serializer),
+                default=default_value,
+                funnel="{}.putTo($name, sink)".format(serializer)
+               )
+
+def gen_list_jtype(java_base_name):
+    # read op assumes the class has a public final static field READER that implements
+    # OFMessageReader<$class> i.e., can deserialize an instance of class from a ChannelBuffer
+    # write op assumes class implements Writeable
+    return JType("List<{}>".format(java_base_name)) \
+        .op(
+            read= 'ChannelUtils.readList(bb, $length, {}Ver$version.READER)'.format(java_base_name), \
+            write='ChannelUtils.writeList(bb, $name)',
+            default="ImmutableList.<{}>of()".format(java_base_name),
+            funnel='FunnelUtils.putList($name, sink)'
+            )
+
+def gen_fixed_length_string_jtype(length):
+    return JType('String').op(
+              read='ChannelUtils.readFixedLengthString(bb, {})'.format(length),
+              write='ChannelUtils.writeFixedLengthString(bb, $name, {})'.format(length),
+              default='""',
+              funnel='sink.putUnencodedChars($name)'
+            )
+
+##### Predefined JType mappings
+# FIXME: This list needs to be pruned / cleaned up. Most of these are schematic.
+
+u8 =  JType('short', 'byte') \
+        .op(read='U8.f(bb.readByte())', write='bb.writeByte(U8.t($name))', pub_type=True) \
+        .op(read='bb.readByte()', write='bb.writeByte($name)', pub_type=False)
+u8_list =  JType('List<U8>') \
+        .op(read='ChannelUtils.readList(bb, $length, U8.READER)',
+            write='ChannelUtils.writeList(bb, $name)',
+            default='ImmutableList.<U8>of()',
+            funnel='FunnelUtils.putList($name, sink)'
+           )
+u16 = JType('int', 'short') \
+        .op(read='U16.f(bb.readShort())', write='bb.writeShort(U16.t($name))', pub_type=True) \
+        .op(read='bb.readShort()', write='bb.writeShort($name)', pub_type=False)
+u32 = JType('long', 'int') \
+        .op(read='U32.f(bb.readInt())', write='bb.writeInt(U32.t($name))', pub_type=True) \
+        .op(read='bb.readInt()', write='bb.writeInt($name)', pub_type=False)
+u32_list = JType('List<U32>', 'int[]') \
+        .op(
+                read='ChannelUtils.readList(bb, $length, U32.READER)',
+                write='ChannelUtils.writeList(bb, $name)',
+                default="ImmutableList.<U32>of()",
+                funnel="FunnelUtils.putList($name, sink)")
+u64_list = JType('List<U64>', 'int[]') \
+        .op(
+                read='ChannelUtils.readList(bb, $length, U64.READER)',
+                write='ChannelUtils.writeList(bb, $name)',
+                default="ImmutableList.<U64>of()",
+                funnel="FunnelUtils.putList($name, sink)")
+u8obj = JType('U8', 'U8') \
+        .op(read='U8.of(bb.readByte())', write='bb.writeByte($name.getRaw())', default="U8.ZERO")
+u32obj = JType('U32', 'U32') \
+        .op(read='U32.of(bb.readInt())', write='bb.writeInt($name.getRaw())', default="U32.ZERO")
+u64 = JType('U64', 'long') \
+        .op(read='U64.ofRaw(bb.readLong())', write='bb.writeLong($name.getValue())', default="U64.ZERO", pub_type=True) \
+        .op(read='bb.readLong()', write='bb.writeLong($name)', pub_type=False)
+of_port = JType("OFPort") \
+         .op(version=1, read="OFPort.read2Bytes(bb)", write="$name.write2Bytes(bb)", default="OFPort.ANY") \
+         .op(version=ANY, read="OFPort.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="OFPort.ANY")
+# the same OFPort, but with a default value of ZERO, only for OF10 match
+of_port_match_v1 = JType("OFPort") \
+         .op(version=1, read="OFPort.read2Bytes(bb)", write="$name.write2Bytes(bb)", default="OFPort.ZERO")
+actions_list = gen_list_jtype("OFAction")
+instructions_list = gen_list_jtype("OFInstruction")
+buckets_list = gen_list_jtype("OFBucket")
+port_desc_list = gen_list_jtype("OFPortDesc")
+packet_queue_list = gen_list_jtype("OFPacketQueue")
+port_desc = JType('OFPortDesc') \
+        .op(read='OFPortDescVer$version.READER.readFrom(bb)', \
+            write='$name.writeTo(bb)')
+octets = JType('byte[]')\
+        .op(read='ChannelUtils.readBytes(bb, $length)', \
+            write='bb.writeBytes($name)', \
+            default="new byte[0]",
+            funnel="sink.putBytes($name)"
+            );
+of_match = JType('Match') \
+        .op(read='ChannelUtilsVer$version.readOFMatch(bb)', \
+            write='$name.writeTo(bb)',
+            default="OFFactoryVer$version.MATCH_WILDCARD_ALL");
+group_mod_cmd = JType('OFGroupModCommand', 'short') \
+        .op(version=ANY, read="bb.readShort()", write="bb.writeShort($name)")
+flow_mod_cmd = JType('OFFlowModCommand', 'short') \
+        .op(version=1, read="bb.readShort()", write="bb.writeShort($name)") \
+        .op(version=ANY, read="bb.readByte()", write="bb.writeByte($name)")
+mac_addr = JType('MacAddress') \
+        .op(read="MacAddress.read6Bytes(bb)", \
+            write="$name.write6Bytes(bb)",
+            default="MacAddress.NONE")
+
+port_name = gen_fixed_length_string_jtype(16)
+desc_str = gen_fixed_length_string_jtype(256)
+serial_num = gen_fixed_length_string_jtype(32)
+table_name = gen_fixed_length_string_jtype(32)
+ipv4 = JType("IPv4Address") \
+        .op(read="IPv4Address.read4Bytes(bb)", \
+            write="$name.write4Bytes(bb)",
+            default='IPv4Address.NONE')
+ipv6 = JType("IPv6Address") \
+        .op(read="IPv6Address.read16Bytes(bb)", \
+            write="$name.write16Bytes(bb)",
+            default='IPv6Address.NONE')
+packetin_reason = gen_enum_jtype("OFPacketInReason")
+transport_port = JType("TransportPort")\
+        .op(read="TransportPort.read2Bytes(bb)",
+            write="$name.write2Bytes(bb)",
+            default="TransportPort.NONE")
+eth_type = JType("EthType")\
+        .op(read="EthType.read2Bytes(bb)",
+            write="$name.write2Bytes(bb)",
+            default="EthType.NONE")
+vlan_vid = JType("VlanVid")\
+        .op(version=ANY, read="VlanVid.read2Bytes(bb)", write="$name.write2Bytes(bb)", default="VlanVid.ZERO")
+vlan_vid_match = JType("OFVlanVidMatch")\
+        .op(version=1, read="OFVlanVidMatch.read2BytesOF10(bb)", write="$name.write2BytesOF10(bb)", default="OFVlanVidMatch.NONE") \
+        .op(version=2, read="OFVlanVidMatch.read2BytesOF10(bb)", write="$name.write2BytesOF10(bb)", default="OFVlanVidMatch.NONE") \
+        .op(version=ANY, read="OFVlanVidMatch.read2Bytes(bb)", write="$name.write2Bytes(bb)", default="OFVlanVidMatch.NONE")
+vlan_pcp = JType("VlanPcp")\
+        .op(read="VlanPcp.readByte(bb)",
+            write="$name.writeByte(bb)",
+            default="VlanPcp.NONE")
+ip_dscp = JType("IpDscp")\
+        .op(read="IpDscp.readByte(bb)",
+            write="$name.writeByte(bb)",
+            default="IpDscp.NONE")
+ip_ecn = JType("IpEcn")\
+        .op(read="IpEcn.readByte(bb)",
+            write="$name.writeByte(bb)",
+            default="IpEcn.NONE")
+ip_proto = JType("IpProtocol")\
+        .op(read="IpProtocol.readByte(bb)",
+            write="$name.writeByte(bb)",
+            default="IpProtocol.NONE")
+icmpv4_type = JType("ICMPv4Type")\
+        .op(read="ICMPv4Type.readByte(bb)",
+            write="$name.writeByte(bb)",
+            default="ICMPv4Type.NONE")
+icmpv4_code = JType("ICMPv4Code")\
+        .op(read="ICMPv4Code.readByte(bb)",
+            write="$name.writeByte(bb)",
+            default="ICMPv4Code.NONE")
+arp_op = JType("ArpOpcode")\
+        .op(read="ArpOpcode.read2Bytes(bb)",
+            write="$name.write2Bytes(bb)",
+            default="ArpOpcode.NONE")
+ipv6_flabel = JType("IPv6FlowLabel")\
+        .op(read="IPv6FlowLabel.read4Bytes(bb)",
+            write="$name.write4Bytes(bb)",
+            default="IPv6FlowLabel.NONE")
+metadata = JType("OFMetadata")\
+        .op(read="OFMetadata.read8Bytes(bb)",
+            write="$name.write8Bytes(bb)",
+            default="OFMetadata.NONE")
+oxm = JType("OFOxm<?>")\
+        .op(  read="OFOxmVer$version.READER.readFrom(bb)",
+              write="$name.writeTo(bb)")
+oxm_list = JType("OFOxmList") \
+        .op(
+            read= 'OFOxmList.readFrom(bb, $length, OFOxmVer$version.READER)', \
+            write='$name.writeTo(bb)',
+            default="OFOxmList.EMPTY")
+meter_features = JType("OFMeterFeatures")\
+        .op(read="OFMeterFeaturesVer$version.READER.readFrom(bb)",
+            write="$name.writeTo(bb)")
+bsn_vport_q_in_q = JType("OFBsnVportQInQ")\
+        .op(read="OFBsnVportQInQVer$version.READER.readFrom(bb)",
+            write="$name.writeTo(bb)")
+flow_wildcards = JType("int") \
+        .op(read='bb.readInt()',
+            write='bb.writeInt($name)',
+            default="OFFlowWildcardsSerializerVer$version.ALL_VAL")
+table_stats_wildcards = JType("int") \
+        .op(read='bb.readInt()',
+            write='bb.writeInt($name)')
+port_bitmap = JType('OFBitMask128') \
+            .op(read='OFBitMask128.read16Bytes(bb)',
+                write='$name.write16Bytes(bb)',
+                default='OFBitMask128.NONE')
+table_id = JType("TableId") \
+        .op(read='TableId.readByte(bb)',
+            write='$name.writeByte(bb)',
+            default='TableId.ALL')
+table_id_default_zero = JType("TableId") \
+        .op(read='TableId.readByte(bb)',
+            write='$name.writeByte(bb)',
+            default='TableId.ZERO')
+of_aux_id = JType("OFAuxId") \
+        .op(read='OFAuxId.readByte(bb)',
+            write='$name.writeByte(bb)',
+            default='OFAuxId.MAIN')
+of_version = JType("OFVersion", 'byte') \
+            .op(read='bb.readByte()', write='bb.writeByte($name)')
+
+port_speed = JType("PortSpeed")
+error_type = JType("OFErrorType")
+of_type = JType("OFType", 'byte') \
+            .op(read='bb.readByte()', write='bb.writeByte($name)')
+action_type= gen_enum_jtype("OFActionType")\
+               .set_priv_type("short")\
+               .op(read='bb.readShort()', write='bb.writeShort($name)', pub_type=False)
+instruction_type = gen_enum_jtype("OFInstructionType")\
+               .set_priv_type('short') \
+               .op(read='bb.readShort()', write='bb.writeShort($name)', pub_type=False)
+buffer_id = JType("OFBufferId") \
+            .op(read="OFBufferId.of(bb.readInt())", write="bb.writeInt($name.getInt())", default="OFBufferId.NO_BUFFER")
+boolean = JType("boolean", "byte") \
+        .op(read='(bb.readByte() != 0)',
+            write='bb.writeByte($name ? 1 : 0)',
+            default="false")
+datapath_id = JType("DatapathId") \
+        .op(read='DatapathId.of(bb.readLong())',
+            write='bb.writeLong($name.getLong())',
+            default='DatapathId.NONE')
+action_type_set = JType("Set<OFActionType>") \
+        .op(read='ChannelUtilsVer10.readSupportedActions(bb)',
+            write='ChannelUtilsVer10.writeSupportedActions(bb, $name)',
+            default='ImmutableSet.<OFActionType>of()',
+            funnel='ChannelUtilsVer10.putSupportedActionsTo($name, sink)')
+of_group = JType("OFGroup") \
+         .op(version=ANY, read="OFGroup.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="OFGroup.ALL")
+of_group_default_any = JType("OFGroup") \
+         .op(version=ANY, read="OFGroup.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="OFGroup.ANY")
+# the outgroup field of of_flow_stats_request has a special default value
+of_group_default_any = JType("OFGroup") \
+         .op(version=ANY, read="OFGroup.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="OFGroup.ANY")
+buffer_id = JType("OFBufferId") \
+         .op(read="OFBufferId.of(bb.readInt())", write="bb.writeInt($name.getInt())", default="OFBufferId.NO_BUFFER")
+lag_id = JType("LagId") \
+         .op(version=ANY, read="LagId.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="LagId.NONE")
+vrf = JType("VRF") \
+         .op(version=ANY, read="VRF.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="VRF.ZERO")
+class_id = JType("ClassId") \
+         .op(version=ANY, read="ClassId.read4Bytes(bb)", write="$name.write4Bytes(bb)", default="ClassId.NONE")
+boolean_value = JType('OFBooleanValue', 'OFBooleanValue') \
+        .op(read='OFBooleanValue.of(bb.readByte() != 0)', write='bb.writeByte($name.getInt())', default="OFBooleanValue.FALSE")
+checksum = JType("OFChecksum128") \
+        .op(read='OFChecksum128.read16Bytes(bb)',
+            write='$name.write16Bytes(bb)',
+            default='OFChecksum128.ZERO')
+gen_table_id = JType("GenTableId") \
+        .op(read='GenTableId.read2Bytes(bb)',
+            write='$name.write2Bytes(bb)',
+           )
+
+generic_t = JType("T")
+
+
+default_mtype_to_jtype_convert_map = {
+        'uint8_t' : u8,
+        'uint16_t' : u16,
+        'uint32_t' : u32,
+        'uint64_t' : u64,
+        'of_port_no_t' : of_port,
+        'list(of_action_t)' : actions_list,
+        'list(of_instruction_t)' : instructions_list,
+        'list(of_bucket_t)': buckets_list,
+        'list(of_port_desc_t)' : port_desc_list,
+        'list(of_packet_queue_t)' : packet_queue_list,
+        'list(of_uint64_t)' : u64_list,
+        'list(of_uint32_t)' : u32_list,
+        'list(of_uint8_t)' : u8_list,
+        'list(of_oxm_t)' : oxm_list,
+        'of_octets_t' : octets,
+        'of_match_t': of_match,
+        'of_fm_cmd_t': flow_mod_cmd,
+        'of_mac_addr_t': mac_addr,
+        'of_port_desc_t': port_desc,
+        'of_desc_str_t': desc_str,
+        'of_serial_num_t': serial_num,
+        'of_port_name_t': port_name,
+        'of_table_name_t': table_name,
+        'of_ipv4_t': ipv4,
+        'of_ipv6_t': ipv6,
+        'of_wc_bmap_t': flow_wildcards,
+        'of_oxm_t': oxm,
+        'of_meter_features_t': meter_features,
+        'of_bitmap_128_t': port_bitmap,
+        'of_checksum_128_t': checksum,
+        'of_bsn_vport_q_in_q_t': bsn_vport_q_in_q,
+        }
+
+## Map that defines exceptions from the standard loxi->java mapping scheme
+# map of {<loxi_class_name> : { <loxi_member_name> : <JType instance> } }
+exceptions = {
+        'of_packet_in': { 'data' : octets, 'reason': packetin_reason },
+        'of_oxm_tcp_src' : { 'value' : transport_port },
+        'of_oxm_tcp_src_masked' : { 'value' : transport_port, 'value_mask' : transport_port },
+        'of_oxm_tcp_dst' : { 'value' : transport_port },
+        'of_oxm_tcp_dst_masked' : { 'value' : transport_port, 'value_mask' : transport_port },
+        'of_oxm_udp_src' : { 'value' : transport_port },
+        'of_oxm_udp_src_masked' : { 'value' : transport_port, 'value_mask' : transport_port },
+        'of_oxm_udp_dst' : { 'value' : transport_port },
+        'of_oxm_udp_dst_masked' : { 'value' : transport_port, 'value_mask' : transport_port },
+        'of_oxm_sctp_src' : { 'value' : transport_port },
+        'of_oxm_sctp_src_masked' : { 'value' : transport_port, 'value_mask' : transport_port },
+        'of_oxm_sctp_dst' : { 'value' : transport_port },
+        'of_oxm_sctp_dst_masked' : { 'value' : transport_port, 'value_mask' : transport_port },
+        'of_oxm_eth_type' : { 'value' : eth_type },
+        'of_oxm_eth_type_masked' : { 'value' : eth_type, 'value_mask' : eth_type },
+        'of_oxm_vlan_vid' : { 'value' : vlan_vid_match },
+        'of_oxm_vlan_vid_masked' : { 'value' : vlan_vid_match, 'value_mask' : vlan_vid_match },
+        'of_oxm_vlan_pcp' : { 'value' : vlan_pcp },
+        'of_oxm_vlan_pcp_masked' : { 'value' : vlan_pcp, 'value_mask' : vlan_pcp },
+        'of_oxm_ip_dscp' : { 'value' : ip_dscp },
+        'of_oxm_ip_dscp_masked' : { 'value' : ip_dscp, 'value_mask' : ip_dscp },
+        'of_oxm_ip_ecn' : { 'value' : ip_ecn },
+        'of_oxm_ip_ecn_masked' : { 'value' : ip_ecn, 'value_mask' : ip_ecn },
+        'of_oxm_ip_proto' : { 'value' : ip_proto },
+        'of_oxm_ip_proto_masked' : { 'value' : ip_proto, 'value_mask' : ip_proto },
+        'of_oxm_icmpv4_type' : { 'value' : icmpv4_type },
+        'of_oxm_icmpv4_type_masked' : { 'value' : icmpv4_type, 'value_mask' : icmpv4_type },
+        'of_oxm_icmpv4_code' : { 'value' : icmpv4_code },
+        'of_oxm_icmpv4_code_masked' : { 'value' : icmpv4_code, 'value_mask' : icmpv4_code },
+        'of_oxm_arp_op' : { 'value' : arp_op },
+        'of_oxm_arp_op_masked' : { 'value' : arp_op, 'value_mask' : arp_op },
+        'of_oxm_arp_spa' : { 'value' : ipv4 },
+        'of_oxm_arp_spa_masked' : { 'value' : ipv4, 'value_mask' : ipv4 },
+        'of_oxm_arp_tpa' : { 'value' : ipv4 },
+        'of_oxm_arp_tpa_masked' : { 'value' : ipv4, 'value_mask' : ipv4 },
+        'of_oxm_ipv6_flabel' : { 'value' : ipv6_flabel },
+        'of_oxm_ipv6_flabel_masked' : { 'value' : ipv6_flabel, 'value_mask' : ipv6_flabel },
+        'of_oxm_metadata' : { 'value' : metadata },
+        'of_oxm_metadata_masked' : { 'value' : metadata, 'value_mask' : metadata },
+
+        'of_oxm_icmpv6_code' : { 'value' : u8obj },
+        'of_oxm_icmpv6_code_masked' : { 'value' : u8obj, 'value_mask' : u8obj },
+        'of_oxm_icmpv6_type' : { 'value' : u8obj },
+        'of_oxm_icmpv6_type_masked' : { 'value' : u8obj, 'value_mask' : u8obj },
+        'of_oxm_mpls_label' : { 'value' : u32obj },
+        'of_oxm_mpls_label_masked' : { 'value' : u32obj, 'value_mask' : u32obj },
+        'of_oxm_mpls_tc' : { 'value' : u8obj },
+        'of_oxm_mpls_tc_masked' : { 'value' : u8obj, 'value_mask' : u8obj },
+
+        'of_oxm_bsn_in_ports_128' : { 'value': port_bitmap },
+        'of_oxm_bsn_in_ports_128_masked' : { 'value': port_bitmap, 'value_mask': port_bitmap },
+
+        'of_oxm_bsn_lag_id' : { 'value' : lag_id },
+        'of_oxm_bsn_lag_id_masked' : { 'value' : lag_id, 'value_mask' : lag_id },
+
+        'of_oxm_bsn_vrf' : { 'value' : vrf },
+        'of_oxm_bsn_vrf_masked' : { 'value' : vrf, 'value_mask' : vrf },
+
+        'of_oxm_bsn_global_vrf_allowed' : { 'value' : boolean_value },
+        'of_oxm_bsn_global_vrf_allowed_masked' : { 'value' : boolean_value, 'value_mask' : boolean_value },
+
+        'of_oxm_bsn_l3_interface_class_id' : { 'value' : class_id },
+        'of_oxm_bsn_l3_interface_class_id_masked' : { 'value' : class_id, 'value_mask' : class_id },
+
+        'of_oxm_bsn_l3_src_class_id' : { 'value' : class_id },
+        'of_oxm_bsn_l3_src_class_id_masked' : { 'value' : class_id, 'value_mask' : class_id },
+
+        'of_oxm_bsn_l3_dst_class_id' : { 'value' : class_id },
+        'of_oxm_bsn_l3_dst_class_id_masked' : { 'value' : class_id, 'value_mask' : class_id },
+
+        'of_table_stats_entry': { 'wildcards': table_stats_wildcards },
+        'of_match_v1': { 'vlan_vid' : vlan_vid_match, 'vlan_pcp': vlan_pcp,
+                'eth_type': eth_type, 'ip_dscp': ip_dscp, 'ip_proto': ip_proto,
+                'tcp_src': transport_port, 'tcp_dst': transport_port,
+                'in_port': of_port_match_v1
+                },
+        'of_bsn_set_l2_table_request': { 'l2_table_enable': boolean },
+        'of_bsn_set_l2_table_reply': { 'l2_table_enable': boolean },
+        'of_bsn_set_pktin_suppression_request': { 'enabled': boolean },
+        'of_flow_stats_request': { 'out_group': of_group_default_any },
+
+        'of_action_bsn_mirror': { 'dest_port': of_port },
+        'of_action_push_mpls': { 'ethertype': eth_type },
+        'of_action_push_pbb': { 'ethertype': eth_type },
+        'of_action_push_vlan': { 'ethertype': eth_type },
+        'of_action_pop_mpls': { 'ethertype': eth_type },
+        'of_action_set_nw_dst': { 'nw_addr': ipv4 },
+        'of_action_set_nw_ecn': { 'nw_ecn': ip_ecn },
+        'of_action_set_nw_src': { 'nw_addr': ipv4 },
+        'of_action_set_tp_dst': { 'tp_port': transport_port },
+        'of_action_set_tp_src': { 'tp_port': transport_port },
+        'of_action_set_vlan_pcp': { 'vlan_pcp': vlan_pcp },
+        'of_action_set_vlan_vid': { 'vlan_vid': vlan_vid },
+
+        'of_group_mod' : { 'command' : group_mod_cmd },
+        'of_group_add' : { 'command' : group_mod_cmd },
+        'of_group_modify' : { 'command' : group_mod_cmd },
+        'of_group_delete' : { 'command' : group_mod_cmd },
+
+        'of_bucket' : { 'watch_group': of_group },
+
+        'of_bsn_tlv_vlan_vid' : { 'value' : vlan_vid },
+        'of_bsn_gentable_entry_add' : { 'table_id' : gen_table_id },
+
+        'of_features_reply' : { 'auxiliary_id' : of_aux_id},
+}
+
+
+@memoize
+def enum_java_types():
+    enum_types = {}
+    for enum in loxi_globals.unified.enums:
+        java_name = name_c_to_caps_camel(re.sub(r'_t$', "", enum.name))
+        enum_types[enum.name] = gen_enum_jtype(java_name, enum.is_bitmask)
+    return enum_types
+
+def make_match_field_jtype(sub_type_name="?"):
+    return JType("MatchField<{}>".format(sub_type_name))
+
+def make_oxm_jtype(sub_type_name="?"):
+    return JType("OFOxm<{}>".format(sub_type_name))
+
+def list_cname_to_java_name(c_type):
+    m = re.match(r'list\(of_([a-zA-Z_]+)_t\)', c_type)
+    if not m:
+        raise Exception("Not a recgonized standard list type declaration: %s" % c_type)
+    base_name = m.group(1)
+    return "OF" + name_c_to_caps_camel(base_name)
+
+#### main entry point for conversion of LOXI types (c_types) Java types.
+# FIXME: This badly needs a refactoring
+
+def convert_to_jtype(obj_name, field_name, c_type):
+    """ Convert from a C type ("uint_32") to a java type ("U32")
+    and return a JType object with the size, internal type, and marshalling functions"""
+    if obj_name in exceptions and field_name in exceptions[obj_name]:
+        return exceptions[obj_name][field_name]
+    elif ( obj_name == "of_header" or loxi_utils.class_is_message(obj_name)) and field_name == "type" and c_type == "uint8_t":
+        return of_type
+    elif field_name == "type" and re.match(r'of_action.*', obj_name):
+        return action_type
+    elif field_name == "err_type":
+        return JType("OFErrorType", 'short') \
+            .op(read='bb.readShort()', write='bb.writeShort($name)')
+    elif field_name == "stats_type":
+        return JType("OFStatsType", 'short') \
+            .op(read='bb.readShort()', write='bb.writeShort($name)')
+    elif field_name == "type" and re.match(r'of_instruction.*', obj_name):
+        return instruction_type
+    elif loxi_utils.class_is(obj_name, "of_flow_mod") and field_name == "table_id" and c_type == "uint8_t":
+        return table_id_default_zero
+    elif loxi_utils.class_is(obj_name, "of_flow_mod") and field_name == "out_group" and c_type == "uint32_t":
+        return of_group_default_any
+    elif field_name == "table_id" and c_type == "uint8_t":
+        return table_id
+    elif field_name == "version" and c_type == "uint8_t":
+        return of_version
+    elif field_name == "buffer_id" and c_type == "uint32_t":
+        return buffer_id
+    elif field_name == "group_id" and c_type == "uint32_t":
+        return of_group
+    elif field_name == 'datapath_id':
+        return datapath_id
+    elif field_name == 'actions' and obj_name == 'of_features_reply':
+        return action_type_set
+    elif field_name == "table_id" and re.match(r'of_bsn_gentable.*', obj_name):
+        return gen_table_id
+    elif c_type in default_mtype_to_jtype_convert_map:
+        return default_mtype_to_jtype_convert_map[c_type]
+    elif re.match(r'list\(of_([a-zA-Z_]+)_t\)', c_type):
+        return gen_list_jtype(list_cname_to_java_name(c_type))
+    elif c_type in enum_java_types():
+        return enum_java_types()[c_type]
+    else:
+        print "WARN: Couldn't find java type conversion for '%s' in %s:%s" % (c_type, obj_name, field_name)
+        jtype = name_c_to_caps_camel(re.sub(r'_t$', "", c_type))
+        return JType(jtype)
+
+
+#### Enum specific wiretype definitions
+enum_wire_types = {
+        "uint8_t": JType("byte").op(read="bb.readByte()", write="bb.writeByte($name)"),
+        "uint16_t": JType("short").op(read="bb.readShort()", write="bb.writeShort($name)"),
+        "uint32_t": JType("int").op(read="bb.readInt()", write="bb.writeInt($name)"),
+        "uint64_t": JType("long").op(read="bb.readLong()", write="bb.writeLong($name)"),
+}
+
+def convert_enum_wire_type_to_jtype(wire_type):
+    return enum_wire_types[wire_type]
diff --git a/java_gen/pre-written/LICENSE.txt b/java_gen/pre-written/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/java_gen/pre-written/LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/java_gen/pre-written/pom.xml b/java_gen/pre-written/pom.xml
new file mode 100644
index 0000000..4f0dc74
--- /dev/null
+++ b/java_gen/pre-written/pom.xml
@@ -0,0 +1,252 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.sonatype.oss</groupId>
+        <artifactId>oss-parent</artifactId>
+        <version>7</version>
+    </parent>
+
+    <groupId>org.projectfloodlight</groupId>
+    <artifactId>openflowj</artifactId>
+    <version>0.3.4-SNAPSHOT</version>
+    <packaging>jar</packaging>
+
+    <name>OpenFlowJ-Loxi</name>
+    <description>OpenFlowJ API supporting OpenFlow versions 1.0 through 1.3.1, generated by LoxiGen</description>
+    <url>http://www.projectfloodlight.org/projects/</url>
+    <licenses>
+        <license>
+            <name>The Apache Software License, Version 2.0</name>
+            <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+            <distribution>repo</distribution>
+        </license>
+    </licenses>
+    <scm>
+        <connection>scm:git:git@github.com:floodlight/loxigen.git</connection>
+        <developerConnection>scm:git:git@github.com:floodlight/loxigen.git</developerConnection>
+        <url>git@github.com:floodlight/loxigen.git</url>
+    </scm>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>annotations</artifactId>
+            <version>2.0.2</version>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.11</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.hamcrest</groupId>
+            <artifactId>hamcrest-integration</artifactId>
+            <version>1.3</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty</artifactId>
+            <version>3.9.0.Final</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>15.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>1.7.5</version>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-core</artifactId>
+            <version>1.0.13</version>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-classic</artifactId>
+            <version>1.0.13</version>
+        </dependency>
+    </dependencies>
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.1</version>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                </configuration>
+            </plugin>
+            <plugin>
+                <!-- pick up sources from gen-src -->
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>build-helper-maven-plugin</artifactId>
+                <version>1.8</version>
+                <executions>
+                    <execution>
+                        <id>gen-src-add-source</id>
+                        <phase>generate-sources</phase>
+                        <goals><goal>add-source</goal></goals>
+                        <configuration>
+                            <sources>
+                                <source>gen-src/main/java</source>
+                            </sources>
+                        </configuration>
+                    </execution>
+                    <execution>
+                        <id>add-gen-src-test-source</id>
+                        <!-- note: purposefully not using phase generate-test-sources, because that is not picked up by eclipse:eclipse -->
+                        <phase>validate</phase>
+                        <goals><goal>add-test-source</goal></goals>
+                        <configuration>
+                            <sources>
+                                <source>gen-src/test/java</source>
+                            </sources>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <!-- attach sources -->
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-source-plugin</artifactId>
+                <version>2.2.1</version>
+                <executions>
+                    <execution>
+                        <id>attach-sources</id>
+                        <goals>
+                            <goal>jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <!-- attach javadoc -->
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-javadoc-plugin</artifactId>
+                <version>2.9.1</version>
+                <executions>
+                    <execution>
+                        <id>attach-javadocs</id>
+                        <goals>
+                            <goal>jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-eclipse-plugin</artifactId>
+                <version>2.9</version>
+                <configuration>
+                    <downloadSources>true</downloadSources>
+                    <downloadJavadocs>true</downloadJavadocs>
+                </configuration>
+            </plugin>
+            <!-- use maven git-commit-id plugin to provide vcs metadata -->
+            <plugin>
+                <groupId>pl.project13.maven</groupId>
+                <artifactId>git-commit-id-plugin</artifactId>
+                <version>2.1.5</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>revision</goal>
+                        </goals>
+                    </execution>
+                </executions>
+
+                <configuration>
+                    <!-- our BuildInfoManager expects dates to be in ISO-8601 format -->
+                    <dateFormat>yyyy-MM-dd'T'HH:mm:ssZ</dateFormat>
+
+                    <verbose>true</verbose>
+
+                    <skipPoms>true</skipPoms>
+                    <generateGitPropertiesFile>false</generateGitPropertiesFile>
+                    <dotGitDirectory>${project.basedir}/../../.git</dotGitDirectory>
+                    <failOnNoGitDirectory>false</failOnNoGitDirectory>
+
+                    <gitDescribe>
+                        <skip>false</skip>
+                        <always>true</always>
+                        <abbrev>7</abbrev>
+                        <dirty>-dirty</dirty>
+                        <forceLongFormat>false</forceLongFormat>
+                    </gitDescribe>
+                </configuration>
+            </plugin>
+            <!-- include git info in generated jars -->
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>2.4</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+                <configuration>
+                    <archive>
+                        <manifest>
+                            <mainClass>org.projectfloodlight.core.Main</mainClass>
+                        </manifest>
+                        <manifestSections>
+                            <manifestSection>
+                                <name>Floodlight-buildinfo</name>
+                                <manifestEntries>
+                                    <projectName>${project.name}</projectName>
+                                    <version>${project.version}</version>
+                                    <vcsRevision>${git.commit.id.abbrev}</vcsRevision>
+                                    <vcsBranch>${git.branch}</vcsBranch>
+                                    <vcsDirty>${git.commit.id.describe}</vcsDirty>
+                                    <buildUser>${user.name}</buildUser>
+                                    <buildDate>${git.build.time}</buildDate>
+                                </manifestEntries>
+                            </manifestSection>
+                        </manifestSections>
+                    </archive>
+                </configuration>
+            </plugin>
+
+            <!--
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-gpg-plugin</artifactId>
+                <version>1.4</version>
+                <executions>
+                    <execution>
+                        <id>sign-artifacts</id>
+                        <phase>verify</phase>
+                        <goals>
+                            <goal>sign</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            -->
+        </plugins>
+        <resources>
+            <resource>
+                <directory>${basedir}</directory>
+                <filtering>false</filtering>
+                <includes>
+                    <include>LICENSE.txt</include>
+                </includes>
+            </resource>
+        </resources>
+    </build>
+</project>
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/annotations/Immutable.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/annotations/Immutable.java
new file mode 100644
index 0000000..5de2171
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/annotations/Immutable.java
@@ -0,0 +1,13 @@
+package org.projectfloodlight.openflow.annotations;
+
+/**
+ * This annotation marks a class that is considered externally immutable. I.e.,
+ * the externally visible state of the class will not change after its
+ * construction. Such a class can be freely shared between threads and does not
+ * require defensive copying (don't call clone).
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ */
+public @interface Immutable {
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/NonExistantMessage.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/NonExistantMessage.java
new file mode 100644
index 0000000..e5192fd
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/NonExistantMessage.java
@@ -0,0 +1,30 @@
+package org.projectfloodlight.openflow.exceptions;
+
+/**
+ * Error: someone asked to create an OFMessage with wireformat type and version,
+ * but that doesn't exist
+ *
+ * @author capveg
+ */
+public class NonExistantMessage extends Exception {
+
+    private static final long serialVersionUID = 1L;
+    byte type;
+    byte version;
+
+    /**
+     * Error: someone asked to create an OFMessage with wireformat type and
+     * version, but that doesn't exist
+     *
+     * @param type
+     *            the wire format
+     * @param version
+     *            the OpenFlow wireformat version number, e.g. 1 == v1.1, 2 =
+     *            v1.2, etc.
+     */
+    public NonExistantMessage(final byte type, final byte version) {
+        this.type = type;
+        this.version = version;
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFParseError.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFParseError.java
new file mode 100644
index 0000000..658dce7
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFParseError.java
@@ -0,0 +1,22 @@
+package org.projectfloodlight.openflow.exceptions;
+
+public class OFParseError extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public OFParseError() {
+        super();
+    }
+
+    public OFParseError(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+
+    public OFParseError(final String message) {
+        super(message);
+    }
+
+    public OFParseError(final Throwable cause) {
+        super(cause);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFShortRead.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFShortRead.java
new file mode 100644
index 0000000..c68a678
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFShortRead.java
@@ -0,0 +1,6 @@
+package org.projectfloodlight.openflow.exceptions;
+
+public class OFShortRead extends Exception {
+    private static final long serialVersionUID = 1L;
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFShortWrite.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFShortWrite.java
new file mode 100644
index 0000000..4f99118
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFShortWrite.java
@@ -0,0 +1,7 @@
+package org.projectfloodlight.openflow.exceptions;
+
+public class OFShortWrite extends Exception {
+
+    private static final long serialVersionUID = 1L;
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFUnsupported.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFUnsupported.java
new file mode 100644
index 0000000..fa52c08
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/exceptions/OFUnsupported.java
@@ -0,0 +1,7 @@
+package org.projectfloodlight.openflow.exceptions;
+
+public class OFUnsupported extends Exception {
+
+    private static final long serialVersionUID = 1L;
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFBsnVportQInQT.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFBsnVportQInQT.java
new file mode 100644
index 0000000..ff077ce
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFBsnVportQInQT.java
@@ -0,0 +1,5 @@
+package org.projectfloodlight.openflow.protocol;
+
+public class OFBsnVportQInQT {
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMatchBmap.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMatchBmap.java
new file mode 100644
index 0000000..68ca86d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMatchBmap.java
@@ -0,0 +1,13 @@
+package org.projectfloodlight.openflow.protocol;
+
+import org.projectfloodlight.openflow.types.PrimitiveSinkable;
+
+import com.google.common.hash.PrimitiveSink;
+
+public class OFMatchBmap implements PrimitiveSinkable{
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMessageReader.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMessageReader.java
new file mode 100644
index 0000000..8837867
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMessageReader.java
@@ -0,0 +1,8 @@
+package org.projectfloodlight.openflow.protocol;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+public interface OFMessageReader<T> {
+    T readFrom(ChannelBuffer bb) throws OFParseError;
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMessageWriter.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMessageWriter.java
new file mode 100644
index 0000000..bec5634
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFMessageWriter.java
@@ -0,0 +1,8 @@
+package org.projectfloodlight.openflow.protocol;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+public interface OFMessageWriter<T> {
+    public void write(ChannelBuffer bb, T message) throws OFParseError;
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFObject.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFObject.java
new file mode 100644
index 0000000..5d37987
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFObject.java
@@ -0,0 +1,11 @@
+package org.projectfloodlight.openflow.protocol;
+
+import org.projectfloodlight.openflow.types.PrimitiveSinkable;
+
+
+/**
+ * Base interface of all OpenFlow objects (e.g., messages, actions, stats, etc.)
+ */
+public interface OFObject extends Writeable, PrimitiveSinkable {
+    OFVersion getVersion();
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFObjectFactory.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFObjectFactory.java
new file mode 100644
index 0000000..c5869ef
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFObjectFactory.java
@@ -0,0 +1,7 @@
+package org.projectfloodlight.openflow.protocol;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+public interface OFObjectFactory<T extends OFObject> {
+    T read(ChannelBuffer buffer);
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFOxmList.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFOxmList.java
new file mode 100644
index 0000000..7f66110
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFOxmList.java
@@ -0,0 +1,154 @@
+package org.projectfloodlight.openflow.protocol;
+
+import java.util.EnumMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.match.MatchField;
+import org.projectfloodlight.openflow.protocol.match.MatchFields;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxm;
+import org.projectfloodlight.openflow.types.OFValueType;
+import org.projectfloodlight.openflow.types.PrimitiveSinkable;
+import org.projectfloodlight.openflow.util.ChannelUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.hash.PrimitiveSink;
+
+public class OFOxmList implements Iterable<OFOxm<?>>, Writeable, PrimitiveSinkable {
+    private static final Logger logger = LoggerFactory.getLogger(OFOxmList.class);
+
+    private final Map<MatchFields, OFOxm<?>> oxmMap;
+
+    public final static OFOxmList EMPTY = new OFOxmList(ImmutableMap.<MatchFields, OFOxm<?>>of());
+
+    private OFOxmList(Map<MatchFields, OFOxm<?>> oxmMap) {
+        this.oxmMap = oxmMap;
+    }
+
+    @SuppressWarnings("unchecked")
+    public <T extends OFValueType<T>> OFOxm<T> get(MatchField<T> matchField) {
+        return (OFOxm<T>) oxmMap.get(matchField.id);
+    }
+
+    public static class Builder {
+        private final Map<MatchFields, OFOxm<?>> oxmMap;
+
+        public Builder() {
+            oxmMap = new EnumMap<MatchFields, OFOxm<?>>(MatchFields.class);
+        }
+
+        public Builder(EnumMap<MatchFields, OFOxm<?>> oxmMap) {
+            this.oxmMap = oxmMap;
+        }
+
+        public <T extends OFValueType<T>> void set(OFOxm<T> oxm) {
+            oxmMap.put(oxm.getMatchField().id, oxm);
+        }
+
+        public <T extends OFValueType<T>> void unset(MatchField<T> matchField) {
+            oxmMap.remove(matchField.id);
+        }
+
+        public OFOxmList build() {
+            return OFOxmList.ofList(oxmMap.values());
+        }
+    }
+
+    @Override
+    public Iterator<OFOxm<?>> iterator() {
+        return oxmMap.values().iterator();
+    }
+
+    public static OFOxmList ofList(Iterable<OFOxm<?>> oxmList) {
+        Map<MatchFields, OFOxm<?>> map = new EnumMap<MatchFields, OFOxm<?>>(
+                MatchFields.class);
+        for (OFOxm<?> o : oxmList) {
+            OFOxm<?> canonical = o.getCanonical();
+
+            if(logger.isDebugEnabled() && !Objects.equal(o, canonical)) {
+                logger.debug("OFOxmList: normalized non-canonical OXM {} to {}", o, canonical);
+            }
+
+            if(canonical != null)
+                map.put(canonical.getMatchField().id, canonical);
+
+        }
+        return new OFOxmList(map);
+    }
+
+    public static OFOxmList of(OFOxm<?>... oxms) {
+        Map<MatchFields, OFOxm<?>> map = new EnumMap<MatchFields, OFOxm<?>>(
+                MatchFields.class);
+        for (OFOxm<?> o : oxms) {
+            OFOxm<?> canonical = o.getCanonical();
+
+            if(logger.isDebugEnabled() && !Objects.equal(o, canonical)) {
+                logger.debug("OFOxmList: normalized non-canonical OXM {} to {}", o, canonical);
+            }
+
+            if(canonical != null)
+                map.put(canonical.getMatchField().id, canonical);
+        }
+        return new OFOxmList(map);
+    }
+
+    public static OFOxmList readFrom(ChannelBuffer bb, int length,
+            OFMessageReader<OFOxm<?>> reader) throws OFParseError {
+        return ofList(ChannelUtils.readList(bb, length, reader));
+    }
+
+    @Override
+    public void writeTo(ChannelBuffer bb) {
+        for (OFOxm<?> o : this) {
+            o.writeTo(bb);
+        }
+    }
+
+    public OFOxmList.Builder createBuilder() {
+        return new OFOxmList.Builder(new EnumMap<MatchFields, OFOxm<?>>(oxmMap));
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((oxmMap == null) ? 0 : oxmMap.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        OFOxmList other = (OFOxmList) obj;
+        if (oxmMap == null) {
+            if (other.oxmMap != null)
+                return false;
+        } else if (!oxmMap.equals(other.oxmMap))
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "OFOxmList" + oxmMap;
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        for (OFOxm<?> o : this) {
+            o.putTo(sink);
+        }
+    }
+
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFRequest.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFRequest.java
new file mode 100644
index 0000000..6666943
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFRequest.java
@@ -0,0 +1,5 @@
+package org.projectfloodlight.openflow.protocol;
+
+/** Type safety interface. Enables type safe combinations of requests and replies */
+public interface OFRequest<REPLY extends OFMessage> extends OFMessage {
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFTableFeature.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFTableFeature.java
new file mode 100644
index 0000000..b722228
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFTableFeature.java
@@ -0,0 +1,5 @@
+package org.projectfloodlight.openflow.protocol;
+
+public class OFTableFeature {
+    // FIXME implement
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFVersion.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFVersion.java
new file mode 100644
index 0000000..6f54e5f
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/OFVersion.java
@@ -0,0 +1,16 @@
+package org.projectfloodlight.openflow.protocol;
+
+public enum OFVersion {
+    OF_10(1), OF_11(2), OF_12(3), OF_13(4);
+
+    public final int wireVersion;
+
+    OFVersion(final int wireVersion) {
+        this.wireVersion = wireVersion;
+    }
+
+    public int getWireVersion() {
+        return wireVersion;
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/Writeable.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/Writeable.java
new file mode 100644
index 0000000..31ae9ab
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/Writeable.java
@@ -0,0 +1,7 @@
+package org.projectfloodlight.openflow.protocol;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+public interface Writeable {
+    void writeTo(ChannelBuffer bb);
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/XidGenerator.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/XidGenerator.java
new file mode 100644
index 0000000..2ee2764
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/XidGenerator.java
@@ -0,0 +1,5 @@
+package org.projectfloodlight.openflow.protocol;
+
+public interface XidGenerator {
+    long nextXid();
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/XidGenerators.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/XidGenerators.java
new file mode 100644
index 0000000..4609afa
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/XidGenerators.java
@@ -0,0 +1,38 @@
+package org.projectfloodlight.openflow.protocol;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+public class XidGenerators {
+    private static final XidGenerator GLOBAL_XID_GENERATOR = new StandardXidGenerator();
+
+    public static XidGenerator create() {
+        return new StandardXidGenerator();
+    }
+
+    public static XidGenerator global() {
+        return GLOBAL_XID_GENERATOR;
+    }
+}
+
+class StandardXidGenerator implements XidGenerator {
+
+    private final AtomicLong xidGen = new AtomicLong();
+    long MAX_XID = 0xFFffFFffL;
+
+    @Override
+    public long nextXid() {
+        long xid;
+        do {
+            xid = xidGen.incrementAndGet();
+            if(xid > MAX_XID) {
+                synchronized(this) {
+                    if(xidGen.get() > MAX_XID) {
+                        xidGen.set(0);
+                    }
+                }
+            }
+        } while(xid > MAX_XID);
+        return xid;
+    }
+
+}
\ No newline at end of file
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/Match.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/Match.java
new file mode 100644
index 0000000..0efdcbb
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/Match.java
@@ -0,0 +1,216 @@
+package org.projectfloodlight.openflow.protocol.match;
+
+import org.projectfloodlight.openflow.protocol.OFObject;
+import org.projectfloodlight.openflow.types.Masked;
+import org.projectfloodlight.openflow.types.OFValueType;
+
+/**
+ * Generic interface for version-agnostic immutable Match structure.
+ * The Match structure is defined in the OpenFlow protocol, and it contains information on
+ * the fields to be matched in a specific flow record.
+ * This interface does not assume anything on the fields in the Match structure. If in
+ * some version, the match structure cannot handle a certain field, it may return <code>false</code>
+ * for <code>supports(...)</code> calls, and throw <code>UnsupportedOperationException</code> from all
+ * other methods in such cases.
+ * <br><br>
+ * On wildcards and masks:<br>
+ * This interface defines the following masking notations for fields:
+ * <ul>
+ * <li><b>Exact</b>: field is matched exactly against a single, fixed value (no mask, or mask is all ones).
+ * <li><b>Wildcarded</b>: field is not being matched. It is fully masked (mask=0) and any value of it
+ * will match the flow record having this match.
+ * <li><b>Partially masked</b>: field is matched using a specified mask which is neither 0 nor all ones. Mask can
+ * be either arbitrary or require some specific structure.
+ * </ul>
+ * Implementing classes may or may not support all types of these masking types. They may also support
+ * them in part. For example, OF1.0 supports exact match and (full) wildcarding for all fields, but it
+ * does only supports partial masking for IP source/destination fields, and this partial masking must be
+ * in the CIDR prefix format. Thus, OF1.0 implementation may throw <code>UnsupportedOperationException</code> if given
+ * in <code>setMasked</code> an IP mask of, for example, 255.0.255.0, or if <code>setMasked</code> is called for any field
+ * which is not IP source/destination address.
+ * <br><br>
+ * On prerequisites:<br>
+ * From the OF1.1 spec, page 28, the OF1.0 spec failed to explicitly specify this, but it
+ * is the behavior of OF1.0 switches:
+ * "Protocol-specific fields within ofp_match will be ignored within a single table when
+ * the corresponding protocol is not specified in the match. The MPLS match fields will
+ * be ignored unless the Ethertype is specified as MPLS. Likewise, the IP header and
+ * transport header fields will be ignored unless the Ethertype is specified as either
+ * IPv4 or ARP. The tp_src and tp_dst fields will be ignored unless the network protocol
+ * specified is as TCP, UDP or SCTP. Fields that are ignored don't need to be wildcarded
+ * and should be set to 0."
+ * <br><br>
+ * This interface uses generics to assure type safety in users code. However, implementing classes may have to suppress
+ * 'unchecked cast' warnings while making sure they correctly cast base on their implementation details.
+ *
+ * @author Yotam Harchol (yotam.harchol@bigswitch.com)
+ */
+public interface Match extends OFObject {
+
+    /**
+     * Returns a value for the given field if:
+     * <ul>
+     * <li>Field is supported
+     * <li>Field is not fully wildcarded
+     * <li>Prerequisites are ok
+     * </ul>
+     * If one of the above conditions does not hold, returns null. Value is returned masked if partially wildcarded.
+     *
+     * @param field Match field to retrieve
+     * @return Value of match field (may be masked), or <code>null</code> if field is one of the conditions above does not hold.
+     * @throws UnsupportedOperationException If field is not supported.
+     */
+    public <F extends OFValueType<F>> F get(MatchField<F> field) throws UnsupportedOperationException;
+
+    /**
+     * Returns the masked value for the given field from this match, along with the mask itself.
+     * Prerequisite: field is partially masked.
+     * If prerequisite is not met, a <code>null</code> is returned.
+     *
+     * @param field Match field to retrieve.
+     * @return Masked value of match field or null if no mask is set.
+     * @throws UnsupportedOperationException If field is not supported.
+     */
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field) throws UnsupportedOperationException;
+
+    /**
+     * Returns true if and only if this match object supports the given match field.
+     *
+     * @param field Match field
+     * @return true if field is supported, false otherwise.
+     */
+    public boolean supports(MatchField<?> field);
+
+    /**
+     * Returns true if and only if this match object supports partially bitmasking of the given field.
+     * (note: not all possible values of this bitmask have to be acceptable)
+     *
+     * @param field Match field.
+     * @return true if field can be partially masked, false otherwise.
+     * @throws UnsupportedOperationException If field is not supported.
+     */
+    public boolean supportsMasked(MatchField<?> field) throws UnsupportedOperationException;
+
+    /**
+     * Returns true if and only if this field is currently specified in the match with an exact value and
+     * no mask. I.e., the specified match will only select packets that match the exact value of getValue(field).
+     *
+     * @param field Match field.
+     * @return true if field has a specific exact value, false if not.
+     * @throws UnsupportedOperationException If field is not supported.
+     */
+    public boolean isExact(MatchField<?> field) throws UnsupportedOperationException;
+
+    /**
+     * True if and only if this field is currently logically unspecified in the match, i.e, the
+     * value returned by getValue(f) has no impact on whether a packet will be selected
+     * by the match or not.
+     *
+     * @param field Match field.
+     * @return true if field is fully wildcarded, false if not.
+     * @throws UnsupportedOperationException If field is not supported.
+     */
+    public boolean isFullyWildcarded(MatchField<?> field) throws UnsupportedOperationException;
+
+    /**
+     * True if and only if this field is currently partially specified in the match, i.e, the
+     * match will only select packets that match (p.value & getMask(field)) == getValue(field),
+     * and getMask(field) != 0.
+     *
+     * @param field Match field.
+     * @return true if field is partially masked, false if not.
+     * @throws UnsupportedOperationException If field is not supported.
+     */
+    public boolean isPartiallyMasked(MatchField<?> field) throws UnsupportedOperationException;
+
+    /**
+     * Get an Iterable over the match fields that have been specified for the
+     * match. This includes the match fields that are exact or masked match
+     * (but not fully wildcarded).
+     *
+     * @return
+     */
+    public Iterable<MatchField<?>> getMatchFields();
+
+    /**
+     * Returns a builder to build new instances of this type of match object.
+     * @return Match builder
+     */
+    public Builder createBuilder();
+
+    /**
+     * Builder interface for Match objects.
+     * Builder is used to create new Match objects and it creates the match according to the version it
+     * corresponds to. The builder uses the same notation of wildcards and masks, and can also throw
+     * <code>UnsupportedOperationException</code> if it is asked to create some matching that is not supported in
+     * the version it represents.
+     *
+     * While used, MatchBuilder may not be consistent in terms of field prerequisites. However, user must
+     * solve these before using the generated Match object as these prerequisites should be enforced in the
+     * getters.
+     *
+     * @author Yotam Harchol (yotam.harchol@bigswitch.com)
+     */
+    interface Builder {
+        public <F extends OFValueType<F>> F get(MatchField<F> field) throws UnsupportedOperationException;
+
+        public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field) throws UnsupportedOperationException;
+
+        public boolean supports(MatchField<?> field);
+
+        public boolean supportsMasked(MatchField<?> field) throws UnsupportedOperationException;
+
+        public boolean isExact(MatchField<?> field) throws UnsupportedOperationException;
+
+        public boolean isFullyWildcarded(MatchField<?> field) throws UnsupportedOperationException;
+
+        public boolean isPartiallyMasked(MatchField<?> field) throws UnsupportedOperationException;
+
+        /**
+         * Sets a specific exact value for a field.
+         *
+         * @param field Match field to set.
+         * @param value Value of match field.
+         * @return the Builder instance used.
+         * @throws UnsupportedOperationException If field is not supported.
+         */
+        public <F extends OFValueType<F>> Builder setExact(MatchField<F> field, F value) throws UnsupportedOperationException;
+
+        /**
+         * Sets a masked value for a field.
+         *
+         * @param field Match field to set.
+         * @param value Value of field.
+         * @param mask Mask value.
+         * @return the Builder instance used.
+         * @throws UnsupportedOperationException If field is not supported, if field is supported but does not support masking, or if mask structure is not supported.
+         */
+        public <F extends OFValueType<F>> Builder setMasked(MatchField<F> field, F value, F mask) throws UnsupportedOperationException;
+
+        /**
+         * Sets a masked value for a field.
+         *
+         * @param field Match field to set.
+         * @param valueWithMask Compound Masked object contains the value and the mask.
+         * @return the Builder instance used.
+         * @throws UnsupportedOperationException If field is not supported, if field is supported but does not support masking, or if mask structure is not supported.
+         */
+        public <F extends OFValueType<F>> Builder setMasked(MatchField<F> field, Masked<F> valueWithMask) throws UnsupportedOperationException;
+
+        /**
+         * Unsets any value given for the field and wildcards it so that it matches any value.
+         *
+         * @param field Match field to unset.
+         * @return the Builder instance used.
+         * @throws UnsupportedOperationException If field is not supported.
+         */
+        public <F extends OFValueType<F>> Builder wildcard(MatchField<F> field) throws UnsupportedOperationException;
+
+        /**
+         * Returns the match created by this builder.
+         *
+         * @return a Match object.
+         */
+        public Match build();
+    }
+}
\ No newline at end of file
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/MatchField.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/MatchField.java
new file mode 100644
index 0000000..2c63048
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/MatchField.java
@@ -0,0 +1,212 @@
+package org.projectfloodlight.openflow.protocol.match;
+
+import org.projectfloodlight.openflow.types.ArpOpcode;
+import org.projectfloodlight.openflow.types.ClassId;
+import org.projectfloodlight.openflow.types.EthType;
+import org.projectfloodlight.openflow.types.ICMPv4Code;
+import org.projectfloodlight.openflow.types.ICMPv4Type;
+import org.projectfloodlight.openflow.types.IPv4Address;
+import org.projectfloodlight.openflow.types.IPv6Address;
+import org.projectfloodlight.openflow.types.IPv6FlowLabel;
+import org.projectfloodlight.openflow.types.IpDscp;
+import org.projectfloodlight.openflow.types.IpEcn;
+import org.projectfloodlight.openflow.types.IpProtocol;
+import org.projectfloodlight.openflow.types.LagId;
+import org.projectfloodlight.openflow.types.MacAddress;
+import org.projectfloodlight.openflow.types.OFBitMask128;
+import org.projectfloodlight.openflow.types.OFBooleanValue;
+import org.projectfloodlight.openflow.types.OFMetadata;
+import org.projectfloodlight.openflow.types.OFPort;
+import org.projectfloodlight.openflow.types.OFValueType;
+import org.projectfloodlight.openflow.types.OFVlanVidMatch;
+import org.projectfloodlight.openflow.types.TransportPort;
+import org.projectfloodlight.openflow.types.U32;
+import org.projectfloodlight.openflow.types.U8;
+import org.projectfloodlight.openflow.types.VRF;
+import org.projectfloodlight.openflow.types.VlanPcp;
+
+@SuppressWarnings("unchecked")
+public class MatchField<F extends OFValueType<F>> {
+    private final String name;
+    public final MatchFields id;
+    private final Prerequisite<?>[] prerequisites;
+
+    private MatchField(final String name, final MatchFields id, Prerequisite<?>... prerequisites) {
+        this.name = name;
+        this.id = id;
+        this.prerequisites = prerequisites;
+    }
+
+    public final static MatchField<OFPort> IN_PORT =
+            new MatchField<OFPort>("in_port", MatchFields.IN_PORT);
+
+    public final static MatchField<OFPort> IN_PHY_PORT =
+            new MatchField<OFPort>("in_phy_port", MatchFields.IN_PHY_PORT,
+                    new Prerequisite<OFPort>(MatchField.IN_PORT));
+
+    public final static MatchField<OFMetadata> METADATA =
+            new MatchField<OFMetadata>("metadata", MatchFields.METADATA);
+
+    public final static MatchField<MacAddress> ETH_DST =
+            new MatchField<MacAddress>("eth_dst", MatchFields.ETH_DST);
+
+    public final static MatchField<MacAddress> ETH_SRC =
+            new MatchField<MacAddress>("eth_src", MatchFields.ETH_SRC);
+
+    public final static MatchField<EthType> ETH_TYPE =
+            new MatchField<EthType>("eth_type", MatchFields.ETH_TYPE);
+
+    public final static MatchField<OFVlanVidMatch> VLAN_VID =
+            new MatchField<OFVlanVidMatch>("vlan_vid", MatchFields.VLAN_VID);
+
+    public final static MatchField<VlanPcp> VLAN_PCP =
+            new MatchField<VlanPcp>("vlan_pcp", MatchFields.VLAN_PCP,
+                    new Prerequisite<OFVlanVidMatch>(MatchField.VLAN_VID));
+
+    public final static MatchField<IpDscp> IP_DSCP =
+            new MatchField<IpDscp>("ip_dscp", MatchFields.IP_DSCP,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv4, EthType.IPv6));
+
+    public final static MatchField<IpEcn> IP_ECN =
+            new MatchField<IpEcn>("ip_ecn", MatchFields.IP_ECN,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv4, EthType.IPv6));
+
+    public final static MatchField<IpProtocol> IP_PROTO =
+            new MatchField<IpProtocol>("ip_proto", MatchFields.IP_PROTO,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv4, EthType.IPv6));
+
+    public final static MatchField<IPv4Address> IPV4_SRC =
+            new MatchField<IPv4Address>("ipv4_src", MatchFields.IPV4_SRC,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv4));
+
+    public final static MatchField<IPv4Address> IPV4_DST =
+            new MatchField<IPv4Address>("ipv4_dst", MatchFields.IPV4_DST,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv4));
+
+    public final static MatchField<TransportPort> TCP_SRC = new MatchField<TransportPort>(
+            "tcp_src", MatchFields.TCP_SRC,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.TCP));
+
+    public final static MatchField<TransportPort> TCP_DST = new MatchField<TransportPort>(
+            "tcp_dst", MatchFields.TCP_DST,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.TCP));
+
+    public final static MatchField<TransportPort> UDP_SRC = new MatchField<TransportPort>(
+            "udp_src", MatchFields.UDP_SRC,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.UDP));
+
+    public final static MatchField<TransportPort> UDP_DST = new MatchField<TransportPort>(
+            "udp_dst", MatchFields.UDP_DST,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.UDP));
+
+    public final static MatchField<TransportPort> SCTP_SRC = new MatchField<TransportPort>(
+            "sctp_src", MatchFields.SCTP_SRC,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.SCTP));
+
+    public final static MatchField<TransportPort> SCTP_DST = new MatchField<TransportPort>(
+            "sctp_dst", MatchFields.SCTP_DST,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.SCTP));
+
+    public final static MatchField<ICMPv4Type> ICMPV4_TYPE = new MatchField<ICMPv4Type>(
+            "icmpv4_type", MatchFields.ICMPV4_TYPE,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.ICMP));
+
+    public final static MatchField<ICMPv4Code> ICMPV4_CODE = new MatchField<ICMPv4Code>(
+            "icmpv4_code", MatchFields.ICMPV4_CODE,
+            new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.ICMP));
+
+    public final static MatchField<ArpOpcode> ARP_OP = new MatchField<ArpOpcode>(
+            "arp_op", MatchFields.ARP_OP,
+            new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.ARP));
+
+    public final static MatchField<IPv4Address> ARP_SPA =
+            new MatchField<IPv4Address>("arp_spa", MatchFields.ARP_SPA,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.ARP));
+
+    public final static MatchField<IPv4Address> ARP_TPA =
+            new MatchField<IPv4Address>("arp_tpa", MatchFields.ARP_TPA,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.ARP));
+
+    public final static MatchField<MacAddress> ARP_SHA =
+            new MatchField<MacAddress>("arp_sha", MatchFields.ARP_SHA,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.ARP));
+
+    public final static MatchField<MacAddress> ARP_THA =
+            new MatchField<MacAddress>("arp_tha", MatchFields.ARP_THA,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.ARP));
+
+    public final static MatchField<IPv6Address> IPV6_SRC =
+            new MatchField<IPv6Address>("ipv6_src", MatchFields.IPV6_SRC,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv6));
+
+    public final static MatchField<IPv6Address> IPV6_DST =
+            new MatchField<IPv6Address>("ipv6_dst", MatchFields.IPV6_DST,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv6));
+
+    public final static MatchField<IPv6FlowLabel> IPV6_FLABEL =
+            new MatchField<IPv6FlowLabel>("ipv6_flabel", MatchFields.IPV6_FLABEL,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.IPv6));
+
+    public final static MatchField<U8> ICMPV6_TYPE =
+            new MatchField<U8>("icmpv6_type", MatchFields.ICMPV6_TYPE,
+                    new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.IPv6_ICMP));
+
+    public final static MatchField<U8> ICMPV6_CODE =
+            new MatchField<U8>("icmpv6_code", MatchFields.ICMPV6_CODE,
+                    new Prerequisite<IpProtocol>(MatchField.IP_PROTO, IpProtocol.IPv6_ICMP));
+
+    public final static MatchField<IPv6Address> IPV6_ND_TARGET =
+            new MatchField<IPv6Address>("ipv6_nd_target", MatchFields.IPV6_ND_TARGET,
+                    new Prerequisite<U8>(MatchField.ICMPV6_TYPE, U8.of((short)135), U8.of((short)136)));
+
+    public final static MatchField<MacAddress> IPV6_ND_SLL =
+            new MatchField<MacAddress>("ipv6_nd_sll", MatchFields.IPV6_ND_SLL,
+                    new Prerequisite<U8>(MatchField.ICMPV6_TYPE, U8.of((short)135)));
+
+    public final static MatchField<MacAddress> IPV6_ND_TLL =
+            new MatchField<MacAddress>("ipv6_nd_tll", MatchFields.IPV6_ND_TLL,
+                    new Prerequisite<U8>(MatchField.ICMPV6_TYPE, U8.of((short)136)));
+
+    public final static MatchField<U32> MPLS_LABEL =
+            new MatchField<U32>("mpls_label", MatchFields.MPLS_LABEL,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.MPLS_UNICAST, EthType.MPLS_MULTICAST));
+
+    public final static MatchField<U8> MPLS_TC =
+            new MatchField<U8>("mpls_tc", MatchFields.MPLS_TC,
+                    new Prerequisite<EthType>(MatchField.ETH_TYPE, EthType.MPLS_UNICAST, EthType.MPLS_MULTICAST));
+
+    public final static MatchField<OFBitMask128> BSN_IN_PORTS_128 =
+            new MatchField<OFBitMask128>("bsn_in_ports_128", MatchFields.BSN_IN_PORTS_128);
+
+    public final static MatchField<LagId> BSN_LAG_ID =
+            new MatchField<LagId>("bsn_lag_id", MatchFields.BSN_LAG_ID);
+
+    public final static MatchField<VRF> BSN_VRF =
+            new MatchField<VRF>("bsn_vrf", MatchFields.BSN_VRF);
+
+    public final static MatchField<OFBooleanValue> BSN_GLOBAL_VRF_ALLOWED =
+            new MatchField<OFBooleanValue>("bsn_global_vrf_allowed", MatchFields.BSN_GLOBAL_VRF_ALLOWED);
+
+    public final static MatchField<ClassId> BSN_L3_INTERFACE_CLASS_ID =
+            new MatchField<ClassId>("bsn_l3_interface_class_id", MatchFields.BSN_L3_INTERFACE_CLASS_ID);
+
+    public final static MatchField<ClassId> BSN_L3_SRC_CLASS_ID =
+            new MatchField<ClassId>("bsn_l3_src_class_id", MatchFields.BSN_L3_SRC_CLASS_ID);
+
+    public final static MatchField<ClassId> BSN_L3_DST_CLASS_ID =
+            new MatchField<ClassId>("bsn_l3_dst_class_id", MatchFields.BSN_L3_DST_CLASS_ID);
+
+    public String getName() {
+        return name;
+    }
+
+    public boolean arePrerequisitesOK(Match match) {
+        for (Prerequisite<?> p : this.prerequisites) {
+            if (!p.isSatisfied(match)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/MatchFields.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/MatchFields.java
new file mode 100644
index 0000000..6d9b20e
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/MatchFields.java
@@ -0,0 +1,48 @@
+package org.projectfloodlight.openflow.protocol.match;
+
+// MUST BE ORDERED BY THE ORDER OF OF SPEC!!!
+public enum MatchFields {
+    IN_PORT,
+    IN_PHY_PORT,
+    METADATA,
+    ETH_DST,
+    ETH_SRC,
+    ETH_TYPE,
+    VLAN_VID,
+    VLAN_PCP,
+    IP_DSCP,
+    IP_ECN,
+    IP_PROTO,
+    IPV4_SRC,
+    IPV4_DST,
+    TCP_SRC,
+    TCP_DST,
+    UDP_SRC,
+    UDP_DST,
+    SCTP_SRC,
+    SCTP_DST,
+    ICMPV4_TYPE,
+    ICMPV4_CODE,
+    ARP_OP,
+    ARP_SPA,
+    ARP_TPA,
+    ARP_SHA,
+    ARP_THA,
+    IPV6_SRC,
+    IPV6_DST,
+    IPV6_FLABEL,
+    ICMPV6_TYPE,
+    ICMPV6_CODE,
+    IPV6_ND_TARGET,
+    IPV6_ND_SLL,
+    IPV6_ND_TLL,
+    MPLS_LABEL,
+    MPLS_TC,
+    BSN_IN_PORTS_128,
+    BSN_LAG_ID,
+    BSN_VRF,
+    BSN_GLOBAL_VRF_ALLOWED,
+    BSN_L3_INTERFACE_CLASS_ID,
+    BSN_L3_SRC_CLASS_ID,
+    BSN_L3_DST_CLASS_ID,
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/Prerequisite.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/Prerequisite.java
new file mode 100644
index 0000000..03d5e79
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/match/Prerequisite.java
@@ -0,0 +1,45 @@
+package org.projectfloodlight.openflow.protocol.match;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.projectfloodlight.openflow.types.OFValueType;
+
+public class Prerequisite<T extends OFValueType<T>> {
+    private final MatchField<T> field;
+    private final Set<OFValueType<T>> values;
+    private boolean any;
+
+    @SafeVarargs
+    public Prerequisite(MatchField<T> field, OFValueType<T>... values) {
+        this.values = new HashSet<OFValueType<T>>();
+        this.field = field;
+        if (values == null || values.length == 0) {
+            this.any = true;
+        } else {
+            this.any = false;
+            for (OFValueType<T> value : values) {
+                this.values.add(value);
+            }
+        }
+    }
+
+    /**
+     * Returns true if this prerequisite is satisfied by the given match object.
+     *
+     * @param match Match object
+     * @return true iff prerequisite is satisfied.
+     */
+    public boolean isSatisfied(Match match) {
+        OFValueType<T> res = match.get(this.field);
+        if (res == null)
+            return false;
+        if (this.any)
+            return true;
+        if (this.values.contains(res)) {
+            return true;
+        }
+        return false;
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver10/ChannelUtilsVer10.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver10/ChannelUtilsVer10.java
new file mode 100644
index 0000000..b4937ba
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver10/ChannelUtilsVer10.java
@@ -0,0 +1,91 @@
+package org.projectfloodlight.openflow.protocol.ver10;
+
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFActionType;
+import org.projectfloodlight.openflow.protocol.match.Match;
+
+import com.google.common.hash.PrimitiveSink;
+
+/**
+ * Collection of helper functions for reading and writing into ChannelBuffers
+ *
+ * @author capveg
+ */
+
+public class ChannelUtilsVer10 {
+    public static Match readOFMatch(final ChannelBuffer bb) throws OFParseError {
+        return OFMatchV1Ver10.READER.readFrom(bb);
+    }
+
+    public static Set<OFActionType> readSupportedActions(ChannelBuffer bb) {
+        int actions = bb.readInt();
+        EnumSet<OFActionType> supportedActions = EnumSet.noneOf(OFActionType.class);
+        if ((actions & (1 << OFActionTypeSerializerVer10.OUTPUT_VAL)) != 0)
+            supportedActions.add(OFActionType.OUTPUT);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_VLAN_VID_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_VLAN_VID);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_VLAN_PCP_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_VLAN_PCP);
+        if ((actions & (1 << OFActionTypeSerializerVer10.STRIP_VLAN_VAL)) != 0)
+            supportedActions.add(OFActionType.STRIP_VLAN);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_DL_SRC_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_DL_SRC);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_DL_DST_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_DL_DST);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_NW_SRC_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_NW_SRC);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_NW_DST_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_NW_DST);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_NW_TOS_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_NW_TOS);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_TP_SRC_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_TP_SRC);
+        if ((actions & (1 << OFActionTypeSerializerVer10.SET_TP_DST_VAL)) != 0)
+            supportedActions.add(OFActionType.SET_TP_DST);
+        if ((actions & (1 << OFActionTypeSerializerVer10.ENQUEUE_VAL)) != 0)
+            supportedActions.add(OFActionType.ENQUEUE);
+        return supportedActions;
+    }
+
+    public static int supportedActionsToWire(Set<OFActionType> supportedActions) {
+        int supportedActionsVal = 0;
+        if (supportedActions.contains(OFActionType.OUTPUT))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.OUTPUT_VAL);
+        if (supportedActions.contains(OFActionType.SET_VLAN_VID))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_VLAN_VID_VAL);
+        if (supportedActions.contains(OFActionType.SET_VLAN_PCP))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_VLAN_PCP_VAL);
+        if (supportedActions.contains(OFActionType.STRIP_VLAN))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.STRIP_VLAN_VAL);
+        if (supportedActions.contains(OFActionType.SET_DL_SRC))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_DL_SRC_VAL);
+        if (supportedActions.contains(OFActionType.SET_DL_DST))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_DL_DST_VAL);
+        if (supportedActions.contains(OFActionType.SET_NW_SRC))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_NW_SRC_VAL);
+        if (supportedActions.contains(OFActionType.SET_NW_DST))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_NW_DST_VAL);
+        if (supportedActions.contains(OFActionType.SET_NW_TOS))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_NW_TOS_VAL);
+        if (supportedActions.contains(OFActionType.SET_TP_SRC))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_TP_SRC_VAL);
+        if (supportedActions.contains(OFActionType.SET_TP_DST))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.SET_TP_DST_VAL);
+        if (supportedActions.contains(OFActionType.ENQUEUE))
+            supportedActionsVal |= (1 << OFActionTypeSerializerVer10.ENQUEUE_VAL);
+        return supportedActionsVal;
+    }
+
+    public static void putSupportedActionsTo(Set<OFActionType> supportedActions, PrimitiveSink sink) {
+        sink.putInt(supportedActionsToWire(supportedActions));
+    }
+
+    public static void writeSupportedActions(ChannelBuffer bb, Set<OFActionType> supportedActions) {
+        bb.writeInt(supportedActionsToWire(supportedActions));
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver11/ChannelUtilsVer11.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver11/ChannelUtilsVer11.java
new file mode 100644
index 0000000..b090e47
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver11/ChannelUtilsVer11.java
@@ -0,0 +1,26 @@
+package org.projectfloodlight.openflow.protocol.ver11;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMatchBmap;
+import org.projectfloodlight.openflow.protocol.match.Match;
+
+/**
+ * Collection of helper functions for reading and writing into ChannelBuffers
+ *
+ * @author capveg
+ */
+
+public class ChannelUtilsVer11 {
+    public static Match readOFMatch(final ChannelBuffer bb) throws OFParseError {
+        return OFMatchV2Ver11.READER.readFrom(bb);
+    }
+
+    public static OFMatchBmap readOFMatchBmap(ChannelBuffer bb) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    public static void writeOFMatchBmap(ChannelBuffer bb, OFMatchBmap match) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver12/ChannelUtilsVer12.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver12/ChannelUtilsVer12.java
new file mode 100644
index 0000000..756363d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver12/ChannelUtilsVer12.java
@@ -0,0 +1,40 @@
+package org.projectfloodlight.openflow.protocol.ver12;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMatchBmap;
+import org.projectfloodlight.openflow.protocol.match.Match;
+import org.projectfloodlight.openflow.protocol.ver12.OFMatchV3Ver12;
+import org.projectfloodlight.openflow.protocol.OFBsnVportQInQ;
+
+/**
+ * Collection of helper functions for reading and writing into ChannelBuffers
+ *
+ * @author capveg
+ */
+
+public class ChannelUtilsVer12 {
+    public static Match readOFMatch(final ChannelBuffer bb) throws OFParseError {
+        return OFMatchV3Ver12.READER.readFrom(bb);
+    }
+
+    // TODO these need to be figured out / removed
+
+    public static OFBsnVportQInQ readOFBsnVportQInQ(ChannelBuffer bb) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    public static void writeOFBsnVportQInQ(ChannelBuffer bb,
+            OFBsnVportQInQ vport) {
+        throw new UnsupportedOperationException("not implemented");
+
+    }
+
+    public static OFMatchBmap readOFMatchBmap(ChannelBuffer bb) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    public static void writeOFMatchBmap(ChannelBuffer bb, OFMatchBmap match) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver13/ChannelUtilsVer13.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver13/ChannelUtilsVer13.java
new file mode 100644
index 0000000..8216bb0
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/protocol/ver13/ChannelUtilsVer13.java
@@ -0,0 +1,26 @@
+package org.projectfloodlight.openflow.protocol.ver13;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMatchBmap;
+import org.projectfloodlight.openflow.protocol.match.Match;
+
+/**
+ * Collection of helper functions for reading and writing into ChannelBuffers
+ *
+ * @author capveg
+ */
+
+public class ChannelUtilsVer13 {
+    public static Match readOFMatch(final ChannelBuffer bb) throws OFParseError {
+        return OFMatchV3Ver13.READER.readFrom(bb);
+    }
+
+    public static OFMatchBmap readOFMatchBmap(ChannelBuffer bb) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+
+    public static void writeOFMatchBmap(ChannelBuffer bb, OFMatchBmap match) {
+        throw new UnsupportedOperationException("not implemented");
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ArpOpcode.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ArpOpcode.java
new file mode 100644
index 0000000..10d8add
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ArpOpcode.java
@@ -0,0 +1,194 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+public class ArpOpcode implements OFValueType<ArpOpcode> {
+
+    final static int LENGTH = 2;
+
+    private static final int VAL_REQUEST   = 1;
+    private static final int VAL_REPLY = 2;
+    private static final int VAL_REQUEST_REVERSE   = 3;
+    private static final int VAL_REPLY_REVERSE = 4;
+    private static final int VAL_DRARP_REQUEST = 5;
+    private static final int VAL_DRARP_REPLY   = 6;
+    private static final int VAL_DRARP_ERROR   = 7;
+    private static final int VAL_INARP_REQUEST = 8;
+    private static final int VAL_INARP_REPLY   = 9;
+    private static final int VAL_ARP_NAK   = 10;
+    private static final int VAL_MARS_REQUEST  = 11;
+    private static final int VAL_MARS_MULTI    = 12;
+    private static final int VAL_MARS_MSERV    = 13;
+    private static final int VAL_MARS_JOIN = 14;
+    private static final int VAL_MARS_LEAVE    = 15;
+    private static final int VAL_MARS_NAK  = 16;
+    private static final int VAL_MARS_UNSERV   = 17;
+    private static final int VAL_MARS_SJOIN    = 18;
+    private static final int VAL_MARS_SLEAVE   = 19;
+    private static final int VAL_MARS_GROUPLIST_REQUEST    = 20;
+    private static final int VAL_MARS_GROUPLIST_REPLY  = 21;
+    private static final int VAL_MARS_REDIRECT_MAP = 22;
+    private static final int VAL_MAPOS_UNARP   = 23;
+    private static final int VAL_OP_EXP1   = 24;
+    private static final int VAL_OP_EXP2   = 25;
+
+    public static final ArpOpcode REQUEST  = new ArpOpcode(VAL_REQUEST);
+    public static final ArpOpcode REPLY    = new ArpOpcode(VAL_REPLY);
+    public static final ArpOpcode REQUEST_REVERSE  = new ArpOpcode(VAL_REQUEST_REVERSE);
+    public static final ArpOpcode REPLY_REVERSE    = new ArpOpcode(VAL_REPLY_REVERSE);
+    public static final ArpOpcode DRARP_REQUEST    = new ArpOpcode(VAL_DRARP_REQUEST);
+    public static final ArpOpcode DRARP_REPLY  = new ArpOpcode(VAL_DRARP_REPLY);
+    public static final ArpOpcode DRARP_ERROR  = new ArpOpcode(VAL_DRARP_ERROR);
+    public static final ArpOpcode INARP_REQUEST    = new ArpOpcode(VAL_INARP_REQUEST);
+    public static final ArpOpcode INARP_REPLY  = new ArpOpcode(VAL_INARP_REPLY);
+    public static final ArpOpcode ARP_NAK  = new ArpOpcode(VAL_ARP_NAK);
+    public static final ArpOpcode MARS_REQUEST = new ArpOpcode(VAL_MARS_REQUEST);
+    public static final ArpOpcode MARS_MULTI   = new ArpOpcode(VAL_MARS_MULTI);
+    public static final ArpOpcode MARS_MSERV   = new ArpOpcode(VAL_MARS_MSERV);
+    public static final ArpOpcode MARS_JOIN    = new ArpOpcode(VAL_MARS_JOIN);
+    public static final ArpOpcode MARS_LEAVE   = new ArpOpcode(VAL_MARS_LEAVE);
+    public static final ArpOpcode MARS_NAK = new ArpOpcode(VAL_MARS_NAK);
+    public static final ArpOpcode MARS_UNSERV  = new ArpOpcode(VAL_MARS_UNSERV);
+    public static final ArpOpcode MARS_SJOIN   = new ArpOpcode(VAL_MARS_SJOIN);
+    public static final ArpOpcode MARS_SLEAVE  = new ArpOpcode(VAL_MARS_SLEAVE);
+    public static final ArpOpcode MARS_GROUPLIST_REQUEST   = new ArpOpcode(VAL_MARS_GROUPLIST_REQUEST);
+    public static final ArpOpcode MARS_GROUPLIST_REPLY = new ArpOpcode(VAL_MARS_GROUPLIST_REPLY);
+    public static final ArpOpcode MARS_REDIRECT_MAP    = new ArpOpcode(VAL_MARS_REDIRECT_MAP);
+    public static final ArpOpcode MAPOS_UNARP  = new ArpOpcode(VAL_MAPOS_UNARP);
+    public static final ArpOpcode OP_EXP1  = new ArpOpcode(VAL_OP_EXP1);
+    public static final ArpOpcode OP_EXP2  = new ArpOpcode(VAL_OP_EXP2);
+
+    private static final int MIN_OPCODE = 0;
+    private static final int MAX_OPCODE = 0xFFFF;
+
+    private static final int NONE_VAL = 0;
+    public static final ArpOpcode NONE = new ArpOpcode(NONE_VAL);
+
+    public static final ArpOpcode NO_MASK = new ArpOpcode(0xFFFFFFFF);
+    public static final ArpOpcode FULL_MASK = new ArpOpcode(0x00000000);
+
+    private final int opcode;
+
+    private ArpOpcode(int opcode) {
+        this.opcode = opcode;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public int getOpcode() {
+        return this.opcode;
+    }
+
+    public static ArpOpcode of(int opcode) {
+        if (opcode < MIN_OPCODE || opcode > MAX_OPCODE)
+            throw new IllegalArgumentException("Invalid ARP opcode: " + opcode);
+        switch (opcode) {
+            case NONE_VAL:
+                return NONE;
+            case VAL_REQUEST:
+                return REQUEST;
+            case VAL_REPLY:
+                return REPLY;
+            case VAL_REQUEST_REVERSE:
+                return REQUEST_REVERSE;
+            case VAL_REPLY_REVERSE:
+                return REPLY_REVERSE;
+            case VAL_DRARP_REQUEST:
+                return DRARP_REQUEST;
+            case VAL_DRARP_REPLY:
+                return DRARP_REPLY;
+            case VAL_DRARP_ERROR:
+                return DRARP_ERROR;
+            case VAL_INARP_REQUEST:
+                return INARP_REQUEST;
+            case VAL_INARP_REPLY:
+                return INARP_REPLY;
+            case VAL_ARP_NAK:
+                return ARP_NAK;
+            case VAL_MARS_REQUEST:
+                return MARS_REQUEST;
+            case VAL_MARS_MULTI:
+                return MARS_MULTI;
+            case VAL_MARS_MSERV:
+                return MARS_MSERV;
+            case VAL_MARS_JOIN:
+                return MARS_JOIN;
+            case VAL_MARS_LEAVE:
+                return MARS_LEAVE;
+            case VAL_MARS_NAK:
+                return MARS_NAK;
+            case VAL_MARS_UNSERV:
+                return MARS_UNSERV;
+            case VAL_MARS_SJOIN:
+                return MARS_SJOIN;
+            case VAL_MARS_SLEAVE:
+                return MARS_SLEAVE;
+            case VAL_MARS_GROUPLIST_REQUEST:
+                return MARS_GROUPLIST_REQUEST;
+            case VAL_MARS_GROUPLIST_REPLY:
+                return MARS_GROUPLIST_REPLY;
+            case VAL_MARS_REDIRECT_MAP:
+                return MARS_REDIRECT_MAP;
+            case VAL_MAPOS_UNARP:
+                return MAPOS_UNARP;
+            case VAL_OP_EXP1:
+                return OP_EXP1;
+            case VAL_OP_EXP2:
+                return OP_EXP2;
+            default:
+                return new ArpOpcode(opcode);
+        }
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.opcode);
+    }
+
+    public static ArpOpcode read2Bytes(ChannelBuffer c) {
+        return ArpOpcode.of(c.readUnsignedShort());
+    }
+
+    @Override
+    public ArpOpcode applyMask(ArpOpcode mask) {
+        return ArpOpcode.of(this.opcode & mask.opcode);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + opcode;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        ArpOpcode other = (ArpOpcode) obj;
+        if (opcode != other.opcode)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int compareTo(ArpOpcode o) {
+        return UnsignedInts.compare(opcode, o.opcode);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort((short) this.opcode);
+    }
+
+}
\ No newline at end of file
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ClassId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ClassId.java
new file mode 100644
index 0000000..7d7c38e
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ClassId.java
@@ -0,0 +1,92 @@
+package org.projectfloodlight.openflow.types;
+
+import javax.annotation.concurrent.Immutable;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+@Immutable
+public class ClassId implements OFValueType<ClassId> {
+    static final int LENGTH = 4;
+
+    private final static int NONE_VAL = 0;
+    public final static ClassId NONE = new ClassId(NONE_VAL);
+
+    private final static int NO_MASK_VAL = 0xFFFFFFFF;
+    public final static ClassId NO_MASK = new ClassId(NO_MASK_VAL);
+    public final static ClassId FULL_MASK = NONE;
+
+    private final int rawValue;
+
+    private ClassId(final int rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    public static ClassId of(final int raw) {
+        if(raw == NONE_VAL)
+            return NONE;
+        else if(raw == NO_MASK_VAL)
+            return NO_MASK;
+        return new ClassId(raw);
+    }
+
+    public int getInt() {
+        return rawValue;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toString(rawValue);
+    }
+
+    @Override
+    public ClassId applyMask(ClassId mask) {
+        return ClassId.of(rawValue & mask.rawValue);    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + rawValue;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        ClassId other = (ClassId) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(rawValue);
+    }
+
+    public static ClassId read4Bytes(ChannelBuffer c) {
+        return ClassId.of(c.readInt());
+    }
+
+    @Override
+    public int compareTo(ClassId o) {
+        return UnsignedInts.compare(rawValue, rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(rawValue);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/DatapathId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/DatapathId.java
new file mode 100644
index 0000000..79fa14f
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/DatapathId.java
@@ -0,0 +1,87 @@
+package org.projectfloodlight.openflow.types;
+
+import org.projectfloodlight.openflow.annotations.Immutable;
+import org.projectfloodlight.openflow.util.HexString;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Longs;
+import com.google.common.primitives.UnsignedLongs;
+
+/**
+ * Abstraction of a datapath ID that can be set and/or accessed as either a
+ * long value or a colon-separated string. Immutable
+ *
+ * @author Rob Vaterlaus <rob.vaterlaus@bigswitch.com>
+ */
+@Immutable
+public class DatapathId implements PrimitiveSinkable, Comparable<DatapathId> {
+
+    public static final DatapathId NONE = new DatapathId(0);
+
+    private final long rawValue;
+
+    private DatapathId(long rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    public static DatapathId of(long rawValue) {
+        return new DatapathId(rawValue);
+    }
+
+    public static DatapathId of(String s) {
+        return new DatapathId(HexString.toLong(s));
+    }
+
+    public static DatapathId of(byte[] bytes) {
+        return new DatapathId(Longs.fromByteArray(bytes));
+    }
+
+    public long getLong() {
+        return rawValue;
+    }
+
+    public U64 getUnsignedLong() {
+        return U64.of(rawValue);
+    }
+
+    public byte[] getBytes() {
+        return Longs.toByteArray(rawValue);
+    }
+
+    @Override
+    public String toString() {
+        return HexString.toHexString(rawValue);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (rawValue ^ (rawValue >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        DatapathId other = (DatapathId) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putLong(rawValue);
+    }
+
+    @Override
+    public int compareTo(DatapathId o) {
+        return UnsignedLongs.compare(rawValue, o.rawValue);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/EthType.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/EthType.java
new file mode 100644
index 0000000..c5f4f86
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/EthType.java
@@ -0,0 +1,270 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+
+/**
+ * EtherType field representation.
+ *
+ * @author Yotam Harchol (yotam.harchol@bigswitch.com)
+ */
+public class EthType implements OFValueType<EthType> {
+    static final int LENGTH = 2;
+
+    private final int rawValue;
+
+    static final int VAL_IPv4              = 0x0800; // Internet Protocol version 4 (IPv4)
+    static final int VAL_ARP               = 0x0806; // Address Resolution Protocol (ARP)
+    static final int VAL_WAKE_ON_LAN       = 0x0842; // Wake-on-LAN[3]
+    static final int VAL_TRILL             = 0x22F3; // IETF TRILL Protocol
+    static final int VAL_DECNET_IV         = 0x6003; // DECnet Phase IV
+    static final int VAL_REV_ARP           = 0x8035; // Reverse Address Resolution Protocol
+    static final int VAL_APPLE_TALK        = 0x809B; // AppleTalk (Ethertalk)
+    static final int VAL_APPLE_TALK_ARP    = 0x80F3; // AppleTalk Address Resolution Protocol (AARP)
+    static final int VAL_VLAN_FRAME        = 0x8100; // VLAN-tagged frame (IEEE 802.1Q) & Shortest Path Bridging IEEE 802.1aq[4]
+    static final int VAL_IPX_8137          = 0x8137; // IPX
+    static final int VAL_IPX_8138          = 0x8138; // IPX
+    static final int VAL_QNX               = 0x8204; // QNX Qnet
+    static final int VAL_IPv6              = 0x86DD; // Internet Protocol Version 6 (IPv6)
+    static final int VAL_ETH_FLOW          = 0x8808; // Ethernet flow control
+    static final int VAL_SLOW_PROTOCOLS    = 0x8809; // Slow Protocols (IEEE 802.3)
+    static final int VAL_COBRANET          = 0x8819; // CobraNet
+    static final int VAL_MPLS_UNICAST      = 0x8847; // MPLS unicast
+    static final int VAL_MPLS_MULTICAST    = 0x8848; // MPLS multicast
+    static final int VAL_PPPoE_DISCOVERY   = 0x8863; // PPPoE Discovery Stage
+    static final int VAL_PPPoE_SESSION     = 0x8864; // PPPoE Session Stage
+    static final int VAL_JUMBO_FRAMES      = 0x8870; // Jumbo Frames
+    static final int VAL_HOMEPLUG_10       = 0x887B; // HomePlug 1.0 MME
+    static final int VAL_EAP_OVER_LAN      = 0x888E; // EAP over LAN (IEEE 802.1X)
+    static final int VAL_PROFINET          = 0x8892; // PROFINET Protocol
+    static final int VAL_HYPERSCSI         = 0x889A; // HyperSCSI (SCSI over Ethernet)
+    static final int VAL_ATA_OVER_ETH      = 0x88A2; // ATA over Ethernet
+    static final int VAL_ETHERCAT          = 0x88A4; // EtherCAT Protocol
+    static final int VAL_BRIDGING          = 0x88A8; // Provider Bridging (IEEE 802.1ad) & Shortest Path Bridging IEEE 802.1aq[5]
+    static final int VAL_POWERLINK         = 0x88AB; // Ethernet Powerlink[citation needed]
+    static final int VAL_LLDP              = 0x88CC; // Link Layer Discovery Protocol (LLDP)
+    static final int VAL_SERCOS            = 0x88CD; // SERCOS III
+    static final int VAL_HOMEPLUG_AV       = 0x88E1; // HomePlug AV MME[citation needed]
+    static final int VAL_MRP               = 0x88E3; // Media Redundancy Protocol (IEC62439-2)
+    static final int VAL_MAC_SEC           = 0x88E5; // MAC security (IEEE 802.1AE)
+    static final int VAL_PTP               = 0x88F7; // Precision Time Protocol (IEEE 1588)
+    static final int VAL_CFM               = 0x8902; // IEEE 802.1ag Connectivity Fault Management (CFM) Protocol / ITU-T Recommendation Y.1731 (OAM)
+    static final int VAL_FCoE              = 0x8906; // Fibre Channel over Ethernet (FCoE)
+    static final int VAL_FCoE_INIT         = 0x8914; // FCoE Initialization Protocol
+    static final int VAL_RoCE              = 0x8915; // RDMA over Converged Ethernet (RoCE)
+    static final int VAL_HSR               = 0x892F; // High-availability Seamless Redundancy (HSR)
+    static final int VAL_CONF_TEST         = 0x9000; // Ethernet Configuration Testing Protocol[6]
+    static final int VAL_Q_IN_Q            = 0x9100; // Q-in-Q
+    static final int VAL_LLT               = 0xCAFE; // Veritas Low Latency Transport (LLT)[7] for Veritas Cluster Server
+
+    public static final EthType IPv4               = new EthType(VAL_IPv4);
+    public static final EthType ARP                = new EthType(VAL_ARP);
+    public static final EthType WAKE_ON_LAN        = new EthType(VAL_WAKE_ON_LAN);
+    public static final EthType TRILL              = new EthType(VAL_TRILL);
+    public static final EthType DECNET_IV          = new EthType(VAL_DECNET_IV);
+    public static final EthType REV_ARP            = new EthType(VAL_REV_ARP );
+    public static final EthType APPLE_TALK         = new EthType(VAL_APPLE_TALK);
+    public static final EthType APPLE_TALK_ARP     = new EthType(VAL_APPLE_TALK_ARP);
+    public static final EthType VLAN_FRAME         = new EthType(VAL_VLAN_FRAME );
+    public static final EthType IPX_8137           = new EthType(VAL_IPX_8137 );
+    public static final EthType IPX_8138           = new EthType(VAL_IPX_8138 );
+    public static final EthType QNX                = new EthType(VAL_QNX );
+    public static final EthType IPv6               = new EthType(VAL_IPv6 );
+    public static final EthType ETH_FLOW           = new EthType(VAL_ETH_FLOW);
+    public static final EthType SLOW_PROTOCOLS     = new EthType(VAL_SLOW_PROTOCOLS );
+    public static final EthType COBRANET           = new EthType(VAL_COBRANET );
+    public static final EthType MPLS_UNICAST       = new EthType(VAL_MPLS_UNICAST );
+    public static final EthType MPLS_MULTICAST     = new EthType(VAL_MPLS_MULTICAST );
+    public static final EthType PPPoE_DISCOVERY    = new EthType(VAL_PPPoE_DISCOVERY);
+    public static final EthType PPPoE_SESSION      = new EthType(VAL_PPPoE_SESSION );
+    public static final EthType JUMBO_FRAMES       = new EthType(VAL_JUMBO_FRAMES );
+    public static final EthType HOMEPLUG_10        = new EthType(VAL_HOMEPLUG_10 );
+    public static final EthType EAP_OVER_LAN       = new EthType(VAL_EAP_OVER_LAN );
+    public static final EthType PROFINET           = new EthType(VAL_PROFINET );
+    public static final EthType HYPERSCSI          = new EthType(VAL_HYPERSCSI );
+    public static final EthType ATA_OVER_ETH       = new EthType(VAL_ATA_OVER_ETH);
+    public static final EthType ETHERCAT           = new EthType(VAL_ETHERCAT );
+    public static final EthType BRIDGING           = new EthType(VAL_BRIDGING );
+    public static final EthType POWERLINK          = new EthType(VAL_POWERLINK );
+    public static final EthType LLDP               = new EthType(VAL_LLDP );
+    public static final EthType SERCOS             = new EthType(VAL_SERCOS );
+    public static final EthType HOMEPLUG_AV        = new EthType(VAL_HOMEPLUG_AV );
+    public static final EthType MRP                = new EthType(VAL_MRP );
+    public static final EthType MAC_SEC            = new EthType(VAL_MAC_SEC);
+    public static final EthType PTP                = new EthType(VAL_PTP );
+    public static final EthType CFM                = new EthType(VAL_CFM );
+    public static final EthType FCoE               = new EthType(VAL_FCoE );
+    public static final EthType FCoE_INIT          = new EthType(VAL_FCoE_INIT );
+    public static final EthType RoCE               = new EthType(VAL_RoCE );
+    public static final EthType HSR                = new EthType(VAL_HSR );
+    public static final EthType CONF_TEST          = new EthType(VAL_CONF_TEST );
+    public static final EthType Q_IN_Q             = new EthType(VAL_Q_IN_Q );
+    public static final EthType LLT                = new EthType(VAL_LLT );
+
+
+    private static final int NONE_VAL = 0x0;
+    public static final EthType NONE = new EthType(NONE_VAL);
+
+    public static final EthType NO_MASK = new EthType(0xFFFFFFFF);
+    public static final EthType FULL_MASK = new EthType(0x00000000);
+
+    private EthType(int type) {
+        this.rawValue = type;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public static EthType of(int type) {
+        switch (type) {
+            case NONE_VAL:
+                return NONE;
+            case VAL_IPv4:
+                return IPv4;
+            case VAL_ARP:
+                return ARP;
+            case VAL_WAKE_ON_LAN:
+                return WAKE_ON_LAN;
+            case VAL_TRILL:
+                return TRILL;
+            case VAL_DECNET_IV:
+                return DECNET_IV;
+            case VAL_REV_ARP:
+                return REV_ARP;
+            case VAL_APPLE_TALK:
+                return APPLE_TALK;
+            case VAL_APPLE_TALK_ARP:
+                return APPLE_TALK_ARP;
+            case VAL_VLAN_FRAME:
+                return VLAN_FRAME;
+            case VAL_IPX_8137:
+                return IPX_8137;
+            case VAL_IPX_8138:
+                return IPX_8138;
+            case VAL_QNX:
+                return QNX;
+            case VAL_IPv6:
+                return IPv6;
+            case VAL_ETH_FLOW:
+                return ETH_FLOW;
+            case VAL_SLOW_PROTOCOLS:
+                return SLOW_PROTOCOLS;
+            case VAL_COBRANET:
+                return COBRANET;
+            case VAL_MPLS_UNICAST:
+                return MPLS_UNICAST;
+            case VAL_MPLS_MULTICAST:
+                return MPLS_MULTICAST;
+            case VAL_PPPoE_DISCOVERY:
+                return PPPoE_DISCOVERY;
+            case VAL_PPPoE_SESSION:
+                return PPPoE_SESSION;
+            case VAL_JUMBO_FRAMES:
+                return JUMBO_FRAMES;
+            case VAL_HOMEPLUG_10:
+                return HOMEPLUG_10;
+            case VAL_EAP_OVER_LAN:
+                return EAP_OVER_LAN;
+            case VAL_PROFINET:
+                return PROFINET;
+            case VAL_HYPERSCSI:
+                return HYPERSCSI;
+            case VAL_ATA_OVER_ETH:
+                return ATA_OVER_ETH;
+            case VAL_ETHERCAT:
+                return ETHERCAT;
+            case VAL_BRIDGING:
+                return BRIDGING;
+            case VAL_POWERLINK:
+                return POWERLINK;
+            case VAL_LLDP:
+                return LLDP;
+            case VAL_SERCOS:
+                return SERCOS;
+            case VAL_HOMEPLUG_AV:
+                return HOMEPLUG_AV;
+            case VAL_MRP:
+                return MRP;
+            case VAL_MAC_SEC:
+                return MAC_SEC;
+            case VAL_PTP:
+                return PTP;
+            case VAL_CFM:
+                return CFM;
+            case VAL_FCoE:
+                return FCoE;
+            case VAL_FCoE_INIT:
+                return FCoE_INIT;
+            case VAL_RoCE:
+                return RoCE;
+            case VAL_HSR:
+                return HSR;
+            case VAL_CONF_TEST:
+                return CONF_TEST;
+            case VAL_Q_IN_Q:
+                return Q_IN_Q;
+            case VAL_LLT:
+                return LLT;
+            default:
+                // TODO: What's here?
+                return new EthType(type);
+        }
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toHexString(rawValue);
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.rawValue);
+    }
+
+    public static EthType read2Bytes(ChannelBuffer c) {
+        return EthType.of(c.readUnsignedShort());
+    }
+
+    @Override
+    public EthType applyMask(EthType mask) {
+        return EthType.of(this.rawValue & mask.rawValue);
+    }
+
+    public int getValue() {
+        return rawValue;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof EthType))
+            return false;
+        EthType o = (EthType)obj;
+        if (o.rawValue != this.rawValue)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 37;
+        int result = 1;
+        result = prime * result + rawValue;
+        return result;
+    }
+
+    @Override
+    public int compareTo(EthType o) {
+        return UnsignedInts.compare(rawValue, o.rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(rawValue);
+    }
+
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/GenTableId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/GenTableId.java
new file mode 100644
index 0000000..cfa7cdf
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/GenTableId.java
@@ -0,0 +1,93 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+public class GenTableId implements OFValueType<GenTableId>, Comparable<GenTableId> {
+    final static int LENGTH = 2;
+
+    private static final int VALIDATION_MASK = 0xFFFF;
+
+    private static final int ALL_VAL = 0xFFFF;
+    private static final int NONE_VAL = 0x0000;
+    public static final GenTableId NONE = new GenTableId(NONE_VAL);
+
+    public static final GenTableId ALL = new GenTableId(ALL_VAL);
+    public static final GenTableId ZERO = NONE;
+
+    private final int id;
+
+    private GenTableId(int id) {
+        this.id = id;
+    }
+
+    public static GenTableId of(int id) {
+        switch(id) {
+            case NONE_VAL:
+                return NONE;
+            case ALL_VAL:
+                return ALL;
+            default:
+                if ((id & VALIDATION_MASK) != id)
+                    throw new IllegalArgumentException("Illegal Table id value: " + id);
+                return new GenTableId(id);
+        }
+    }
+
+    @Override
+    public String toString() {
+        return "0x" + Integer.toHexString(id);
+    }
+
+    public int getValue() {
+        return id;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.id);
+    }
+
+    public static GenTableId read2Bytes(ChannelBuffer c) throws OFParseError {
+        return GenTableId.of(c.readUnsignedShort());
+    }
+
+    @Override
+    public GenTableId applyMask(GenTableId mask) {
+        return GenTableId.of(this.id & mask.id);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof GenTableId))
+            return false;
+        GenTableId other = (GenTableId)obj;
+        if (other.id != this.id)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int prime = 13873;
+        return this.id * prime;
+    }
+
+    @Override
+    public int compareTo(GenTableId other) {
+        return UnsignedInts.compare(this.id, other.id);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort((byte) id);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ICMPv4Code.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ICMPv4Code.java
new file mode 100644
index 0000000..6466eee
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ICMPv4Code.java
@@ -0,0 +1,93 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+/**
+ *
+ * @author Yotam Harchol (yotam.harchol@bigswitch.com)
+ *
+ */
+public class ICMPv4Code implements OFValueType<ICMPv4Code> {
+
+    final static int LENGTH = 1;
+    final static short MAX_CODE = 0xFF;
+
+    private final short code;
+
+    private static final short NONE_VAL = 0;
+    public static final ICMPv4Code NONE = new ICMPv4Code(NONE_VAL);
+
+    public static final ICMPv4Code NO_MASK = new ICMPv4Code((short)0xFFFF);
+    public static final ICMPv4Code FULL_MASK = new ICMPv4Code((short)0x0000);
+
+    private ICMPv4Code(short code) {
+        this.code = code;
+    }
+
+    public static ICMPv4Code of(short code) {
+        if(code == NONE_VAL)
+            return NONE;
+
+        if (code > MAX_CODE || code < 0)
+            throw new IllegalArgumentException("Illegal ICMPv4 code: " + code);
+        return new ICMPv4Code(code);
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public short getCode() {
+        return code;
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.code);
+    }
+
+    public static ICMPv4Code readByte(ChannelBuffer c) {
+        return ICMPv4Code.of(c.readUnsignedByte());
+    }
+
+    @Override
+    public ICMPv4Code applyMask(ICMPv4Code mask) {
+        return ICMPv4Code.of((short)(this.code & mask.code));
+    }
+
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + code;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        ICMPv4Code other = (ICMPv4Code) obj;
+        if (code != other.code)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int compareTo(ICMPv4Code o) {
+        return Shorts.compare(code, o.code);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort(code);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ICMPv4Type.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ICMPv4Type.java
new file mode 100644
index 0000000..a0aa152
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/ICMPv4Type.java
@@ -0,0 +1,203 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+public class ICMPv4Type implements OFValueType<ICMPv4Type> {
+    final static int LENGTH = 1;
+
+    private static final short VAL_ECHO_REPLY    = 0;
+    private static final short VAL_DESTINATION_UNREACHABLE   = 3;
+    private static final short VAL_SOURCE_QUENCH = 4;
+    private static final short VAL_REDIRECT  = 5;
+    private static final short VAL_ALTERNATE_HOST_ADDRESS    = 6;
+    private static final short VAL_ECHO  = 8;
+    private static final short VAL_ROUTER_ADVERTISEMENT  = 9;
+    private static final short VAL_ROUTER_SOLICITATION   = 10;
+    private static final short VAL_TIME_EXCEEDED = 11;
+    private static final short VAL_PARAMETER_PROBLEM = 12;
+    private static final short VAL_TIMESTAMP = 13;
+    private static final short VAL_TIMESTAMP_REPLY   = 14;
+    private static final short VAL_INFORMATION_REQUEST   = 15;
+    private static final short VAL_INFORMATION_REPLY = 16;
+    private static final short VAL_ADDRESS_MASK_REQUEST  = 17;
+    private static final short VAL_ADDRESS_MASK_REPLY    = 18;
+    private static final short VAL_TRACEROUTE    = 30;
+    private static final short VAL_DATAGRAM_CONVERSION_ERROR = 31;
+    private static final short VAL_MOBILE_HOST_REDIRECT  = 32;
+    private static final short VAL_IPV6_WHERE_ARE_YOU    = 33;
+    private static final short VAL_IPV6_I_AM_HERE    = 34;
+    private static final short VAL_MOBILE_REGISTRATION_REQUEST   = 35;
+    private static final short VAL_MOBILE_REGISTRATION_REPLY = 36;
+    private static final short VAL_DOMAIN_NAME_REQUEST   = 37;
+    private static final short VAL_DOMAIN_NAME_REPLY = 38;
+    private static final short VAL_SKIP  = 39;
+    private static final short VAL_PHOTURIS  = 40;
+    private static final short VAL_EXPERIMENTAL_MOBILITY = 41;
+
+    public static final ICMPv4Type ECHO_REPLY   = new ICMPv4Type(VAL_ECHO_REPLY);
+    public static final ICMPv4Type DESTINATION_UNREACHABLE  = new ICMPv4Type(VAL_DESTINATION_UNREACHABLE);
+    public static final ICMPv4Type SOURCE_QUENCH    = new ICMPv4Type(VAL_SOURCE_QUENCH);
+    public static final ICMPv4Type REDIRECT = new ICMPv4Type(VAL_REDIRECT);
+    public static final ICMPv4Type ALTERNATE_HOST_ADDRESS   = new ICMPv4Type(VAL_ALTERNATE_HOST_ADDRESS);
+    public static final ICMPv4Type ECHO = new ICMPv4Type(VAL_ECHO);
+    public static final ICMPv4Type ROUTER_ADVERTISEMENT = new ICMPv4Type(VAL_ROUTER_ADVERTISEMENT);
+    public static final ICMPv4Type ROUTER_SOLICITATION  = new ICMPv4Type(VAL_ROUTER_SOLICITATION);
+    public static final ICMPv4Type TIME_EXCEEDED    = new ICMPv4Type(VAL_TIME_EXCEEDED);
+    public static final ICMPv4Type PARAMETER_PROBLEM    = new ICMPv4Type(VAL_PARAMETER_PROBLEM);
+    public static final ICMPv4Type TIMESTAMP    = new ICMPv4Type(VAL_TIMESTAMP);
+    public static final ICMPv4Type TIMESTAMP_REPLY  = new ICMPv4Type(VAL_TIMESTAMP_REPLY);
+    public static final ICMPv4Type INFORMATION_REQUEST  = new ICMPv4Type(VAL_INFORMATION_REQUEST);
+    public static final ICMPv4Type INFORMATION_REPLY    = new ICMPv4Type(VAL_INFORMATION_REPLY);
+    public static final ICMPv4Type ADDRESS_MASK_REQUEST = new ICMPv4Type(VAL_ADDRESS_MASK_REQUEST);
+    public static final ICMPv4Type ADDRESS_MASK_REPLY   = new ICMPv4Type(VAL_ADDRESS_MASK_REPLY);
+    public static final ICMPv4Type TRACEROUTE   = new ICMPv4Type(VAL_TRACEROUTE);
+    public static final ICMPv4Type DATAGRAM_CONVERSION_ERROR    = new ICMPv4Type(VAL_DATAGRAM_CONVERSION_ERROR);
+    public static final ICMPv4Type MOBILE_HOST_REDIRECT = new ICMPv4Type(VAL_MOBILE_HOST_REDIRECT);
+    public static final ICMPv4Type IPV6_WHERE_ARE_YOU  = new ICMPv4Type(VAL_IPV6_WHERE_ARE_YOU);
+    public static final ICMPv4Type IPV6_I_AM_HERE = new ICMPv4Type(VAL_IPV6_I_AM_HERE);
+    public static final ICMPv4Type MOBILE_REGISTRATION_REQUEST  = new ICMPv4Type(VAL_MOBILE_REGISTRATION_REQUEST);
+    public static final ICMPv4Type MOBILE_REGISTRATION_REPLY    = new ICMPv4Type(VAL_MOBILE_REGISTRATION_REPLY);
+    public static final ICMPv4Type DOMAIN_NAME_REQUEST  = new ICMPv4Type(VAL_DOMAIN_NAME_REQUEST);
+    public static final ICMPv4Type DOMAIN_NAME_REPLY    = new ICMPv4Type(VAL_DOMAIN_NAME_REPLY);
+    public static final ICMPv4Type SKIP = new ICMPv4Type(VAL_SKIP);
+    public static final ICMPv4Type PHOTURIS = new ICMPv4Type(VAL_PHOTURIS);
+    public static final ICMPv4Type EXPERIMENTAL_MOBILITY    = new ICMPv4Type(VAL_EXPERIMENTAL_MOBILITY);
+
+    // HACK alert - we're disapproriating ECHO_REPLY (value 0) as 'none' as well
+    public static final ICMPv4Type NONE   = ECHO_REPLY;
+
+    public static final ICMPv4Type NO_MASK = new ICMPv4Type((short)0xFFFF);
+    public static final ICMPv4Type FULL_MASK = new ICMPv4Type((short)0x0000);
+
+    private final short type;
+
+    private static final int MIN_TYPE = 0;
+    private static final int MAX_TYPE = 0xFF;
+
+    private ICMPv4Type(short type) {
+        this.type = type;
+    }
+
+    public static ICMPv4Type of(short type) {
+        if (type < MIN_TYPE || type > MAX_TYPE)
+            throw new IllegalArgumentException("Invalid ICMPv4 type: " + type);
+        switch (type) {
+            case VAL_ECHO_REPLY:
+                return ECHO_REPLY;
+            case VAL_DESTINATION_UNREACHABLE:
+                return DESTINATION_UNREACHABLE;
+            case VAL_SOURCE_QUENCH:
+                return SOURCE_QUENCH;
+            case VAL_REDIRECT:
+                return REDIRECT;
+            case VAL_ALTERNATE_HOST_ADDRESS:
+                return ALTERNATE_HOST_ADDRESS;
+            case VAL_ECHO:
+                return ECHO;
+            case VAL_ROUTER_ADVERTISEMENT:
+                return ROUTER_ADVERTISEMENT;
+            case VAL_ROUTER_SOLICITATION:
+                return ROUTER_SOLICITATION;
+            case VAL_TIME_EXCEEDED:
+                return TIME_EXCEEDED;
+            case VAL_PARAMETER_PROBLEM:
+                return PARAMETER_PROBLEM;
+            case VAL_TIMESTAMP:
+                return TIMESTAMP;
+            case VAL_TIMESTAMP_REPLY:
+                return TIMESTAMP_REPLY;
+            case VAL_INFORMATION_REQUEST:
+                return INFORMATION_REQUEST;
+            case VAL_INFORMATION_REPLY:
+                return INFORMATION_REPLY;
+            case VAL_ADDRESS_MASK_REQUEST:
+                return ADDRESS_MASK_REQUEST;
+            case VAL_ADDRESS_MASK_REPLY:
+                return ADDRESS_MASK_REPLY;
+            case VAL_TRACEROUTE:
+                return TRACEROUTE;
+            case VAL_DATAGRAM_CONVERSION_ERROR:
+                return DATAGRAM_CONVERSION_ERROR;
+            case VAL_MOBILE_HOST_REDIRECT:
+                return MOBILE_HOST_REDIRECT;
+            case VAL_IPV6_WHERE_ARE_YOU:
+                return IPV6_WHERE_ARE_YOU;
+            case VAL_IPV6_I_AM_HERE:
+                return IPV6_I_AM_HERE;
+            case VAL_MOBILE_REGISTRATION_REQUEST:
+                return MOBILE_REGISTRATION_REQUEST;
+            case VAL_MOBILE_REGISTRATION_REPLY:
+                return MOBILE_REGISTRATION_REPLY;
+            case VAL_DOMAIN_NAME_REQUEST:
+                return DOMAIN_NAME_REQUEST;
+            case VAL_DOMAIN_NAME_REPLY:
+                return DOMAIN_NAME_REPLY;
+            case VAL_SKIP:
+                return SKIP;
+            case VAL_PHOTURIS:
+                return PHOTURIS;
+            case VAL_EXPERIMENTAL_MOBILITY:
+                return EXPERIMENTAL_MOBILITY;
+            default:
+                return new ICMPv4Type(type);
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public short getType() {
+        return type;
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.type);
+    }
+
+    public static ICMPv4Type readByte(ChannelBuffer c) {
+        return ICMPv4Type.of(c.readUnsignedByte());
+    }
+
+    @Override
+    public ICMPv4Type applyMask(ICMPv4Type mask) {
+        return ICMPv4Type.of((short)(this.type & mask.type));
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + type;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        ICMPv4Type other = (ICMPv4Type) obj;
+        if (type != other.type)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int compareTo(ICMPv4Type o) {
+        return Shorts.compare(type, o.type);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort(type);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPAddress.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPAddress.java
new file mode 100644
index 0000000..c96be83
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPAddress.java
@@ -0,0 +1,41 @@
+package org.projectfloodlight.openflow.types;
+
+public abstract class IPAddress<F extends IPAddress<F>> implements OFValueType<F> {
+
+    public abstract IPVersion getIpVersion();
+
+    /**
+     * Checks if this IPAddress represents a valid CIDR style netmask, i.e.,
+     * it has a set of leading "1" bits followed by only "0" bits
+     * @return true if this represents a valid CIDR style netmask, false
+     * otherwise
+     */
+    public abstract boolean isCidrMask();
+
+    /**
+     * If this IPAddress represents a valid CIDR style netmask (see
+     * isCidrMask()) returns the length of the prefix (the number of "1" bits).
+     * @return length of CIDR mask if this represents a valid CIDR mask
+     * @throws IllegalStateException if isCidrMask() == false
+     */
+    public abstract int asCidrMaskLength();
+
+    @Override
+    public abstract boolean equals(Object other);
+
+    @Override
+    public abstract int hashCode();
+
+    public static IPAddress<?> of(String ip) {
+        if (ip == null) {
+            throw new NullPointerException("String ip must not be null");
+        }
+        if (ip.indexOf('.') != -1)
+            return IPv4Address.of(ip);
+        else if (ip.indexOf(':') != -1)
+            return IPv6Address.of(ip);
+        else
+            throw new IllegalArgumentException("IP Address not well formed: " + ip);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPAddressWithMask.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPAddressWithMask.java
new file mode 100644
index 0000000..2087ab4
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPAddressWithMask.java
@@ -0,0 +1,41 @@
+package org.projectfloodlight.openflow.types;
+
+
+public abstract class IPAddressWithMask<F extends IPAddress<F>> extends Masked<F> {
+
+    protected IPAddressWithMask(F value, F mask) {
+        super(value, mask);
+    }
+
+    public abstract IPVersion getIpVersion();
+
+    public static IPAddressWithMask<?> of(String ip) {
+        if (ip == null) {
+            throw new NullPointerException("String ip must not be null");
+        }
+        if (ip.indexOf('.') != -1)
+            return IPv4AddressWithMask.of(ip);
+        else if (ip.indexOf(':') != -1)
+            return IPv6AddressWithMask.of(ip);
+        else
+            throw new IllegalArgumentException("IP Address not well formed: " + ip);
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder res = new StringBuilder();
+        res.append(value.toString());
+
+        res.append('/');
+        if (mask.isCidrMask()) {
+            // CIDR notation
+            res.append(mask.asCidrMaskLength());
+        } else {
+            // Full address mask
+            res.append(mask.toString());
+        }
+
+        return res.toString();
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPVersion.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPVersion.java
new file mode 100644
index 0000000..5bfc6d8
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPVersion.java
@@ -0,0 +1,6 @@
+package org.projectfloodlight.openflow.types;
+
+public enum IPVersion {
+    IPv4,
+    IPv6
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv4Address.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv4Address.java
new file mode 100644
index 0000000..a9da637
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv4Address.java
@@ -0,0 +1,199 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+
+
+/**
+ * Wrapper around an IPv4Address address
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ */
+public class IPv4Address extends IPAddress<IPv4Address> {
+    static final int LENGTH = 4;
+    private final int rawValue;
+
+    private static final int NOT_A_CIDR_MASK = -1;
+    private static final int CIDR_MASK_CACHE_UNSET = -2;
+    // Must appear before the static IPv4Address constant assignments
+    private volatile int cidrMaskLengthCache = CIDR_MASK_CACHE_UNSET;
+
+    private final static int NONE_VAL = 0x0;
+    public final static IPv4Address NONE = new IPv4Address(NONE_VAL);
+
+    public static final IPv4Address NO_MASK = IPv4Address.of(0xFFFFFFFF);
+    public static final IPv4Address FULL_MASK = IPv4Address.of(0x00000000);
+
+    private IPv4Address(final int rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    @Override
+    public IPVersion getIpVersion() {
+        return IPVersion.IPv4;
+    }
+
+    private int asCidrMaskLengthInternal() {
+        if (cidrMaskLengthCache == CIDR_MASK_CACHE_UNSET) {
+            // No lock required. We only write cidrMaskLengthCache once
+            int maskint = getInt();
+            if (maskint == 0) {
+                cidrMaskLengthCache = 0;
+            } else if (Integer.bitCount((~maskint) + 1) == 1) {
+                // IP represents a true CIDR prefix length
+                cidrMaskLengthCache = Integer.bitCount(maskint);
+            } else {
+                cidrMaskLengthCache = NOT_A_CIDR_MASK;
+            }
+        }
+        return cidrMaskLengthCache;
+    }
+
+    @Override
+    public boolean isCidrMask() {
+        return asCidrMaskLengthInternal() != NOT_A_CIDR_MASK;
+    }
+
+    @Override
+    public int asCidrMaskLength() {
+        if (!isCidrMask()) {
+            throw new IllegalStateException("IP is not a valid CIDR prefix " +
+                    "mask " + toString());
+        } else {
+            return asCidrMaskLengthInternal();
+        }
+    }
+
+    public static IPv4Address of(final byte[] address) {
+        if (address == null) {
+            throw new NullPointerException("Address must not be null");
+        }
+        if (address.length != LENGTH) {
+            throw new IllegalArgumentException(
+                    "Invalid byte array length for IPv4Address address: " + address.length);
+        }
+
+        int raw =
+                (address[0] & 0xFF) << 24 | (address[1] & 0xFF) << 16
+                        | (address[2] & 0xFF) << 8 | (address[3] & 0xFF) << 0;
+        return IPv4Address.of(raw);
+    }
+
+    public static IPv4Address of(final int raw) {
+        if(raw == NONE_VAL)
+            return NONE;
+        return new IPv4Address(raw);
+    }
+
+    public static IPv4Address of(final String string) {
+        if (string == null) {
+            throw new NullPointerException("String must not be null");
+        }
+        int start = 0;
+        int shift = 24;
+
+        int raw = 0;
+        while (shift >= 0) {
+            int end = string.indexOf('.', start);
+            if (end == start || !((shift > 0) ^ (end < 0)))
+                throw new IllegalArgumentException("IP Address not well formed: " + string);
+
+            String substr =
+                    end > 0 ? string.substring(start, end) : string.substring(start);
+            int val = Integer.parseInt(substr);
+            if (val < 0 || val > 255)
+                throw new IllegalArgumentException("IP Address not well formed: " + string);
+
+            raw |= val << shift;
+
+            shift -= 8;
+            start = end + 1;
+        }
+        return IPv4Address.of(raw);
+    }
+
+    public int getInt() {
+        return rawValue;
+    }
+
+    volatile byte[] bytesCache = null;
+
+    public byte[] getBytes() {
+        if (bytesCache == null) {
+            synchronized (this) {
+                if (bytesCache == null) {
+                    bytesCache =
+                            new byte[] { (byte) ((rawValue >>> 24) & 0xFF),
+                                    (byte) ((rawValue >>> 16) & 0xFF),
+                                    (byte) ((rawValue >>> 8) & 0xFF),
+                                    (byte) ((rawValue >>> 0) & 0xFF) };
+                }
+            }
+        }
+        return bytesCache;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder res = new StringBuilder();
+        res.append((rawValue >> 24) & 0xFF).append('.');
+        res.append((rawValue >> 16) & 0xFF).append('.');
+        res.append((rawValue >> 8) & 0xFF).append('.');
+        res.append((rawValue >> 0) & 0xFF);
+        return res.toString();
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(rawValue);
+    }
+
+    public static IPv4Address read4Bytes(ChannelBuffer c) {
+        return IPv4Address.of(c.readInt());
+    }
+
+    @Override
+    public IPv4Address applyMask(IPv4Address mask) {
+        return IPv4Address.of(this.rawValue & mask.rawValue);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + rawValue;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        IPv4Address other = (IPv4Address) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int compareTo(IPv4Address o) {
+        return UnsignedInts.compare(rawValue, o.rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(rawValue);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv4AddressWithMask.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv4AddressWithMask.java
new file mode 100644
index 0000000..9b60c6a
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv4AddressWithMask.java
@@ -0,0 +1,84 @@
+package org.projectfloodlight.openflow.types;
+
+
+public class IPv4AddressWithMask extends IPAddressWithMask<IPv4Address> {
+    public final static IPv4AddressWithMask NONE = of(IPv4Address.NONE, IPv4Address.NONE);
+
+    private IPv4AddressWithMask(int rawValue, int rawMask) {
+        super(IPv4Address.of(rawValue), IPv4Address.of(rawMask));
+    }
+
+    private IPv4AddressWithMask(IPv4Address value, IPv4Address mask) {
+        super(value, mask);
+    }
+
+    @Override
+    public IPVersion getIpVersion() {
+        return IPVersion.IPv4;
+    }
+
+    public static IPv4AddressWithMask of(int rawValue, int rawMask) {
+        return new IPv4AddressWithMask(rawValue, rawMask);
+    }
+
+    public static IPv4AddressWithMask of(IPv4Address value, IPv4Address mask) {
+        if (value == null) {
+            throw new NullPointerException("Value must not be null");
+        }
+        if (mask == null) {
+            throw new NullPointerException("Mask must not be null");
+        }
+        return new IPv4AddressWithMask(value, mask);
+    }
+
+    public static IPv4AddressWithMask of(final String string) {
+        if (string == null) {
+            throw new NullPointerException("String must not be null");
+        }
+        int slashPos;
+        String ip = string;
+        int maskBits = 32;
+        IPv4Address maskAddress = null;
+
+        // Read mask suffix
+        if ((slashPos = string.indexOf('/')) != -1) {
+            ip = string.substring(0, slashPos);
+            try {
+                String suffix = string.substring(slashPos + 1);
+                if (suffix.length() == 0)
+                    throw new IllegalArgumentException("IP Address not well formed: " + string);
+                if (suffix.indexOf('.') != -1) {
+                    // Full mask
+                    maskAddress = IPv4Address.of(suffix);
+                } else {
+                    // CIDR Suffix
+                    maskBits = Integer.parseInt(suffix);
+                }
+            } catch (NumberFormatException e) {
+                throw new IllegalArgumentException("IP Address not well formed: " + string);
+            }
+            if (maskBits < 0 || maskBits > 32) {
+                throw new IllegalArgumentException("IP Address not well formed: " + string);
+            }
+        }
+
+        // Read IP
+        IPv4Address ipv4 = IPv4Address.of(ip);
+
+        if (maskAddress != null) {
+            // Full address mask
+            return IPv4AddressWithMask.of(ipv4, maskAddress);
+        } else if (maskBits == 32) {
+            // No mask
+            return IPv4AddressWithMask.of(ipv4, IPv4Address.NO_MASK);
+        } else if (maskBits == 0) {
+            // No mask
+            return IPv4AddressWithMask.of(ipv4, IPv4Address.FULL_MASK);
+        } else {
+            // With mask
+            int mask = (-1) << (32 - maskBits);
+            return IPv4AddressWithMask.of(ipv4, IPv4Address.of(mask));
+        }
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6Address.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6Address.java
new file mode 100644
index 0000000..4e7b856
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6Address.java
@@ -0,0 +1,379 @@
+package org.projectfloodlight.openflow.types;
+
+import java.util.regex.Pattern;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Longs;
+
+/**
+ * IPv6 address object. Instance controlled, immutable. Internal representation:
+ * two 64 bit longs (not that you'd have to know).
+ *
+ * @author Andreas Wundsam <andreas.wundsam@teleteach.de>
+ */
+public class IPv6Address extends IPAddress<IPv6Address> {
+    static final int LENGTH = 16;
+    private final long raw1;
+    private final long raw2;
+
+    private static final int NOT_A_CIDR_MASK = -1;
+    private static final int CIDR_MASK_CACHE_UNSET = -2;
+    // Must appear before the static IPv4Address constant assignments
+    private volatile int cidrMaskLengthCache = CIDR_MASK_CACHE_UNSET;
+
+    private final static long NONE_VAL1 = 0x0L;
+    private final static long NONE_VAL2 = 0x0L;
+    public static final IPv6Address NONE = new IPv6Address(NONE_VAL1, NONE_VAL2);
+
+
+    public static final IPv6Address NO_MASK = IPv6Address.of(0xFFFFFFFFFFFFFFFFl, 0xFFFFFFFFFFFFFFFFl);
+    public static final IPv6Address FULL_MASK = IPv6Address.of(0x0, 0x0);
+
+    private IPv6Address(final long raw1, final long raw2) {
+        this.raw1 = raw1;
+        this.raw2 = raw2;
+    }
+
+    @Override
+    public IPVersion getIpVersion() {
+        return IPVersion.IPv6;
+    }
+
+
+    private int computeCidrMask64(long raw) {
+        long mask = raw;
+        if (raw == 0)
+            return 0;
+        else if (Long.bitCount((~mask) + 1) == 1) {
+            // represent a true CIDR prefix length
+            return Long.bitCount(mask);
+        }
+        else {
+            // Not a true prefix
+            return NOT_A_CIDR_MASK;
+        }
+    }
+
+    private int asCidrMaskLengthInternal() {
+        if (cidrMaskLengthCache == CIDR_MASK_CACHE_UNSET) {
+            // No synchronization needed. Writing cidrMaskLengthCache only once
+            if (raw1 == 0 && raw2 == 0) {
+                cidrMaskLengthCache = 0;
+            } else if (raw1 == -1L) {
+                // top half is all 1 bits
+                int tmpLength = computeCidrMask64(raw2);
+                if (tmpLength != NOT_A_CIDR_MASK)
+                    tmpLength += 64;
+                cidrMaskLengthCache = tmpLength;
+            } else if (raw2 == 0) {
+                cidrMaskLengthCache = computeCidrMask64(raw1);
+            } else {
+                cidrMaskLengthCache = NOT_A_CIDR_MASK;
+            }
+        }
+        return cidrMaskLengthCache;
+    }
+
+    @Override
+    public boolean isCidrMask() {
+        return asCidrMaskLengthInternal() != NOT_A_CIDR_MASK;
+    }
+
+    @Override
+    public int asCidrMaskLength() {
+        if (!isCidrMask()) {
+            throw new IllegalStateException("IP is not a valid CIDR prefix " +
+                    "mask " + toString());
+        } else {
+            return asCidrMaskLengthInternal();
+        }
+    }
+
+    public static IPv6Address of(final byte[] address) {
+        if (address == null) {
+            throw new NullPointerException("Address must not be null");
+        }
+        if (address.length != LENGTH) {
+            throw new IllegalArgumentException(
+                    "Invalid byte array length for IPv6 address: " + address.length);
+        }
+
+        long raw1 =
+                (address[0] & 0xFFL) << 56 | (address[1] & 0xFFL) << 48
+                        | (address[2] & 0xFFL) << 40 | (address[3] & 0xFFL) << 32
+                        | (address[4] & 0xFFL) << 24 | (address[5] & 0xFFL) << 16
+                        | (address[6] & 0xFFL) << 8 | (address[7]);
+
+        long raw2 =
+                (address[8] & 0xFFL) << 56 | (address[9] & 0xFFL) << 48
+                        | (address[10] & 0xFFL) << 40 | (address[11] & 0xFFL) << 32
+                        | (address[12] & 0xFFL) << 24 | (address[13] & 0xFFL) << 16
+                        | (address[14] & 0xFFL) << 8 | (address[15]);
+
+        return IPv6Address.of(raw1, raw2);
+    }
+
+    private static class IPv6Builder {
+        private long raw1, raw2;
+
+        public void setUnsignedShortWord(final int i, final int value) {
+            int shift = 48 - (i % 4) * 16;
+
+            if (value < 0 || value > 0xFFFF)
+                throw new IllegalArgumentException("16 bit word must be in [0, 0xFFFF]");
+
+            if (i >= 0 && i < 4)
+                raw1 = raw1 & ~(0xFFFFL << shift) | (value & 0xFFFFL) << shift;
+            else if (i >= 4 && i < 8)
+                raw2 = raw2 & ~(0xFFFFL << shift) | (value & 0xFFFFL) << shift;
+            else
+                throw new IllegalArgumentException("16 bit word index must be in [0,7]");
+        }
+
+        public IPv6Address getIPv6() {
+            return IPv6Address.of(raw1, raw2);
+        }
+    }
+
+    private final static Pattern colonPattern = Pattern.compile(":");
+
+    public static IPv6Address of(final String string) {
+        if (string == null) {
+            throw new NullPointerException("String must not be null");
+        }
+        IPv6Builder builder = new IPv6Builder();
+        String[] parts = colonPattern.split(string, -1);
+
+        int leftWord = 0;
+        int leftIndex = 0;
+
+        boolean hitZeroCompression = false;
+
+        for (leftIndex = 0; leftIndex < parts.length; leftIndex++) {
+            String part = parts[leftIndex];
+            if (part.length() == 0) {
+                // hit empty group of zero compression
+                hitZeroCompression = true;
+                break;
+            }
+            builder.setUnsignedShortWord(leftWord++, Integer.parseInt(part, 16));
+        }
+
+        if (hitZeroCompression) {
+            if (leftIndex == 0) {
+                // if colon is at the start, two columns must be at the start,
+                // move to the second empty group
+                leftIndex = 1;
+                if (parts.length < 2 || parts[1].length() > 0)
+                    throw new IllegalArgumentException("Malformed IPv6 address: " + string);
+            }
+
+            int rightWord = 7;
+            int rightIndex;
+            for (rightIndex = parts.length - 1; rightIndex > leftIndex; rightIndex--) {
+                String part = parts[rightIndex];
+                if (part.length() == 0)
+                    break;
+                builder.setUnsignedShortWord(rightWord--, Integer.parseInt(part, 16));
+            }
+            if (rightIndex == parts.length - 1) {
+                // if colon is at the end, two columns must be at the end, move
+                // to the second empty group
+                if (rightIndex < 1 || parts[rightIndex - 1].length() > 0)
+                    throw new IllegalArgumentException("Malformed IPv6 address: " + string);
+                rightIndex--;
+            }
+            if (leftIndex != rightIndex)
+                throw new IllegalArgumentException("Malformed IPv6 address: " + string);
+        } else {
+            if (leftIndex != 8) {
+                throw new IllegalArgumentException("Malformed IPv6 address: " + string);
+            }
+        }
+        return builder.getIPv6();
+    }
+
+    public static IPv6Address of(final long raw1, final long raw2) {
+        if(raw1==NONE_VAL1 && raw2 == NONE_VAL2)
+            return NONE;
+        return new IPv6Address(raw1, raw2);
+    }
+
+    volatile byte[] bytesCache = null;
+
+    public byte[] getBytes() {
+        if (bytesCache == null) {
+            synchronized (this) {
+                if (bytesCache == null) {
+                    bytesCache =
+                            new byte[] { (byte) ((raw1 >> 56) & 0xFF),
+                                    (byte) ((raw1 >> 48) & 0xFF),
+                                    (byte) ((raw1 >> 40) & 0xFF),
+                                    (byte) ((raw1 >> 32) & 0xFF),
+                                    (byte) ((raw1 >> 24) & 0xFF),
+                                    (byte) ((raw1 >> 16) & 0xFF),
+                                    (byte) ((raw1 >> 8) & 0xFF),
+                                    (byte) ((raw1 >> 0) & 0xFF),
+
+                                    (byte) ((raw2 >> 56) & 0xFF),
+                                    (byte) ((raw2 >> 48) & 0xFF),
+                                    (byte) ((raw2 >> 40) & 0xFF),
+                                    (byte) ((raw2 >> 32) & 0xFF),
+                                    (byte) ((raw2 >> 24) & 0xFF),
+                                    (byte) ((raw2 >> 16) & 0xFF),
+                                    (byte) ((raw2 >> 8) & 0xFF),
+                                    (byte) ((raw2 >> 0) & 0xFF) };
+                }
+            }
+        }
+        return bytesCache;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public String toString() {
+        return toString(true, false);
+    }
+
+    public int getUnsignedShortWord(final int i) {
+        if (i >= 0 && i < 4)
+            return (int) ((raw1 >>> (48 - i * 16)) & 0xFFFF);
+        else if (i >= 4 && i < 8)
+            return (int) ((raw2 >>> (48 - (i - 4) * 16)) & 0xFFFF);
+        else
+            throw new IllegalArgumentException("16 bit word index must be in [0,7]");
+    }
+
+    /** get the index of the first word where to apply IPv6 zero compression */
+    public int getZeroCompressStart() {
+        int start = Integer.MAX_VALUE;
+        int maxLength = -1;
+
+        int candidateStart = -1;
+
+        for (int i = 0; i < 8; i++) {
+            if (candidateStart >= 0) {
+                // in a zero octect
+                if (getUnsignedShortWord(i) != 0) {
+                    // end of this candidate word
+                    int candidateLength = i - candidateStart;
+                    if (candidateLength >= maxLength) {
+                        start = candidateStart;
+                        maxLength = candidateLength;
+                    }
+                    candidateStart = -1;
+                }
+            } else {
+                // not in a zero octect
+                if (getUnsignedShortWord(i) == 0) {
+                    candidateStart = i;
+                }
+            }
+        }
+
+        if (candidateStart >= 0) {
+            int candidateLength = 8 - candidateStart;
+            if (candidateLength >= maxLength) {
+                start = candidateStart;
+                maxLength = candidateLength;
+            }
+        }
+
+        return start;
+    }
+
+    public String toString(final boolean zeroCompression, final boolean leadingZeros) {
+        StringBuilder res = new StringBuilder();
+
+        int compressionStart = zeroCompression ? getZeroCompressStart() : Integer.MAX_VALUE;
+        boolean inCompression = false;
+        boolean colonNeeded = false;
+
+        for (int i = 0; i < 8; i++) {
+            int word = getUnsignedShortWord(i);
+
+            if (word == 0) {
+                if (inCompression)
+                    continue;
+                else if (i == compressionStart) {
+                    res.append(':').append(':');
+                    inCompression = true;
+                    colonNeeded = false;
+                    continue;
+                }
+            } else {
+                inCompression = false;
+            }
+
+            if (colonNeeded) {
+                res.append(':');
+                colonNeeded = false;
+            }
+
+            res.append(leadingZeros ? String.format("%04x", word) : Integer.toString(word,
+                    16));
+            colonNeeded = true;
+        }
+        return res.toString();
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (raw1 ^ (raw1 >>> 32));
+        result = prime * result + (int) (raw2 ^ (raw2 >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        IPv6Address other = (IPv6Address) obj;
+        if (raw1 != other.raw1)
+            return false;
+        if (raw2 != other.raw2)
+            return false;
+        return true;
+    }
+
+    public void write16Bytes(ChannelBuffer c) {
+        c.writeLong(this.raw1);
+        c.writeLong(this.raw2);
+    }
+
+    public static IPv6Address read16Bytes(ChannelBuffer c) throws OFParseError {
+        return IPv6Address.of(c.readLong(), c.readLong());
+    }
+
+    @Override
+    public IPv6Address applyMask(IPv6Address mask) {
+        return IPv6Address.of(this.raw1 & mask.raw1, this.raw2 & mask.raw2);
+    }
+
+    @Override
+    public int compareTo(IPv6Address o) {
+        int res = Longs.compare(raw1, o.raw1);
+        if(res != 0)
+            return res;
+        else
+            return Longs.compare(raw2, o.raw2);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putLong(raw1);
+        sink.putLong(raw2);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6AddressWithMask.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6AddressWithMask.java
new file mode 100644
index 0000000..7259c7f
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6AddressWithMask.java
@@ -0,0 +1,92 @@
+package org.projectfloodlight.openflow.types;
+
+import java.math.BigInteger;
+import java.util.Arrays;
+
+public class IPv6AddressWithMask extends IPAddressWithMask<IPv6Address> {
+    public final static IPv6AddressWithMask NONE = of(IPv6Address.NONE, IPv6Address.NONE);
+
+    private IPv6AddressWithMask(IPv6Address value, IPv6Address mask) {
+        super(value, mask);
+    }
+
+    @Override
+    public IPVersion getIpVersion() {
+        return IPVersion.IPv6;
+    }
+
+    public static IPv6AddressWithMask of(IPv6Address value, IPv6Address mask) {
+        if (value == null) {
+            throw new NullPointerException("Value must not be null");
+        }
+        if (mask == null) {
+            throw new NullPointerException("Mask must not be null");
+        }
+        return new IPv6AddressWithMask(value, mask);
+    }
+
+
+    public static IPv6AddressWithMask of(final String string) {
+        if (string == null) {
+            throw new NullPointerException("String must not be null");
+        }
+        int slashPos;
+        String ip = string;
+        int maskBits = 128;
+        IPv6Address maskAddress = null;
+
+        // Read mask suffix
+        if ((slashPos = string.indexOf('/')) != -1) {
+            ip = string.substring(0, slashPos);
+            try {
+                String suffix = string.substring(slashPos + 1);
+                if (suffix.length() == 0)
+                    throw new IllegalArgumentException("IPv6 Address not well formed: " + string);
+                if (suffix.indexOf(':') != -1) {
+                    // Full mask
+                    maskAddress = IPv6Address.of(suffix);
+                } else {
+                    // CIDR Suffix
+                    maskBits = Integer.parseInt(suffix);
+                }
+            } catch (NumberFormatException e) {
+                throw new IllegalArgumentException("IPv6 Address not well formed: " + string);
+            }
+            if (maskBits < 0 || maskBits > 128) {
+                throw new IllegalArgumentException("IPv6 Address not well formed: " + string);
+            }
+        }
+
+        // Read IP
+        IPv6Address ipv6 = IPv6Address.of(ip);
+
+        if (maskAddress != null) {
+            // Full address mask
+            return IPv6AddressWithMask.of(ipv6, maskAddress);
+        } else if (maskBits == 128) {
+            // No mask
+            return IPv6AddressWithMask.of(ipv6, IPv6Address.NO_MASK);
+        } else if (maskBits == 0) {
+            // Entirely masked out
+            return IPv6AddressWithMask.of(ipv6, IPv6Address.FULL_MASK);
+        }else {
+            // With mask
+            BigInteger mask = BigInteger.ONE.negate().shiftLeft(128 - maskBits);
+            byte[] maskBytesTemp = mask.toByteArray();
+            byte[] maskBytes;
+            if (maskBytesTemp.length < 16) {
+                maskBytes = new byte[16];
+                System.arraycopy(maskBytesTemp, 0, maskBytes, 16 - maskBytesTemp.length, maskBytesTemp.length);
+                Arrays.fill(maskBytes, 0, 16 - maskBytesTemp.length, (byte)(0xFF));
+            } else if (maskBytesTemp.length > 16) {
+                maskBytes = new byte[16];
+                System.arraycopy(maskBytesTemp, 0, maskBytes, 0, maskBytes.length);
+            } else {
+                maskBytes = maskBytesTemp;
+            }
+            return IPv6AddressWithMask.of(ipv6, IPv6Address.of(maskBytes));
+        }
+    }
+
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6FlowLabel.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6FlowLabel.java
new file mode 100644
index 0000000..de49b51
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IPv6FlowLabel.java
@@ -0,0 +1,85 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+public class IPv6FlowLabel implements OFValueType<IPv6FlowLabel> {
+
+    static final int LENGTH = 4;
+
+    private final int label;
+
+    private final static int NONE_VAL = 0x0;
+    public static final IPv6FlowLabel NONE = new IPv6FlowLabel(NONE_VAL);
+
+    public static final IPv6FlowLabel NO_MASK = IPv6FlowLabel.of(0xFFFFFFFF);
+    public static final IPv6FlowLabel FULL_MASK = IPv6FlowLabel.of(0x0);
+
+    private IPv6FlowLabel(int label) {
+        this.label = label;
+    }
+
+    public static IPv6FlowLabel of(int label) {
+        if(label == NONE_VAL)
+            return NONE;
+        return new IPv6FlowLabel(label);
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof IPv6FlowLabel))
+            return false;
+        IPv6FlowLabel other = (IPv6FlowLabel)obj;
+        if (other.label != this.label)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 59;
+        int result = 1;
+        result = prime * result + label;
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toHexString(label);
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(this.label);
+    }
+
+    public static IPv6FlowLabel read4Bytes(ChannelBuffer c) throws OFParseError {
+        return IPv6FlowLabel.of((int)(c.readUnsignedInt() & 0xFFFFFFFF));
+    }
+
+    @Override
+    public IPv6FlowLabel applyMask(IPv6FlowLabel mask) {
+        return IPv6FlowLabel.of(this.label & mask.label);
+    }
+
+    public int getIPv6FlowLabelValue() {
+        return label;
+    }
+
+    @Override
+    public int compareTo(IPv6FlowLabel o) {
+        return UnsignedInts.compare(label, o.label);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(this.label);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpDscp.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpDscp.java
new file mode 100644
index 0000000..27596b7
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpDscp.java
@@ -0,0 +1,254 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+
+public enum IpDscp implements OFValueType<IpDscp> {
+    DSCP_0((byte)0),
+    DSCP_1((byte)1),
+    DSCP_2((byte)2),
+    DSCP_3((byte)3),
+    DSCP_4((byte)4),
+    DSCP_5((byte)5),
+    DSCP_6((byte)6),
+    DSCP_7((byte)7),
+    DSCP_8((byte)8),
+    DSCP_9((byte)9),
+    DSCP_10((byte)10),
+    DSCP_11((byte)11),
+    DSCP_12((byte)12),
+    DSCP_13((byte)13),
+    DSCP_14((byte)14),
+    DSCP_15((byte)15),
+    DSCP_16((byte)16),
+    DSCP_17((byte)17),
+    DSCP_18((byte)18),
+    DSCP_19((byte)19),
+    DSCP_20((byte)20),
+    DSCP_21((byte)21),
+    DSCP_22((byte)22),
+    DSCP_23((byte)23),
+    DSCP_24((byte)24),
+    DSCP_25((byte)25),
+    DSCP_26((byte)26),
+    DSCP_27((byte)27),
+    DSCP_28((byte)28),
+    DSCP_29((byte)29),
+    DSCP_30((byte)30),
+    DSCP_31((byte)31),
+    DSCP_32((byte)32),
+    DSCP_33((byte)33),
+    DSCP_34((byte)34),
+    DSCP_35((byte)35),
+    DSCP_36((byte)36),
+    DSCP_37((byte)37),
+    DSCP_38((byte)38),
+    DSCP_39((byte)39),
+    DSCP_40((byte)40),
+    DSCP_41((byte)41),
+    DSCP_42((byte)42),
+    DSCP_43((byte)43),
+    DSCP_44((byte)44),
+    DSCP_45((byte)45),
+    DSCP_46((byte)46),
+    DSCP_47((byte)47),
+    DSCP_48((byte)48),
+    DSCP_49((byte)49),
+    DSCP_50((byte)50),
+    DSCP_51((byte)51),
+    DSCP_52((byte)52),
+    DSCP_53((byte)53),
+    DSCP_54((byte)54),
+    DSCP_55((byte)55),
+    DSCP_56((byte)56),
+    DSCP_57((byte)57),
+    DSCP_58((byte)58),
+    DSCP_59((byte)59),
+    DSCP_60((byte)60),
+    DSCP_61((byte)61),
+    DSCP_62((byte)62),
+    DSCP_63((byte)63),
+    DSCP_NO_MASK((byte)0xFF);
+
+    static final int LENGTH = 1;
+
+    public static final IpDscp NONE = DSCP_0;
+
+    public static final IpDscp NO_MASK = DSCP_NO_MASK;
+    public static final IpDscp FULL_MASK = DSCP_0;
+
+    private final byte dscp;
+
+    private IpDscp(byte dscp) {
+        this.dscp = dscp;
+    }
+
+    public static IpDscp of(byte dscp) {
+        switch (dscp) {
+            case 0:
+                return DSCP_0;
+            case 1:
+                return DSCP_1;
+            case 2:
+                return DSCP_2;
+            case 3:
+                return DSCP_3;
+            case 4:
+                return DSCP_4;
+            case 5:
+                return DSCP_5;
+            case 6:
+                return DSCP_6;
+            case 7:
+                return DSCP_7;
+            case 8:
+                return DSCP_8;
+            case 9:
+                return DSCP_9;
+            case 10:
+                return DSCP_10;
+            case 11:
+                return DSCP_11;
+            case 12:
+                return DSCP_12;
+            case 13:
+                return DSCP_13;
+            case 14:
+                return DSCP_14;
+            case 15:
+                return DSCP_15;
+            case 16:
+                return DSCP_16;
+            case 17:
+                return DSCP_17;
+            case 18:
+                return DSCP_18;
+            case 19:
+                return DSCP_19;
+            case 20:
+                return DSCP_20;
+            case 21:
+                return DSCP_21;
+            case 22:
+                return DSCP_22;
+            case 23:
+                return DSCP_23;
+            case 24:
+                return DSCP_24;
+            case 25:
+                return DSCP_25;
+            case 26:
+                return DSCP_26;
+            case 27:
+                return DSCP_27;
+            case 28:
+                return DSCP_28;
+            case 29:
+                return DSCP_29;
+            case 30:
+                return DSCP_30;
+            case 31:
+                return DSCP_31;
+            case 32:
+                return DSCP_32;
+            case 33:
+                return DSCP_33;
+            case 34:
+                return DSCP_34;
+            case 35:
+                return DSCP_35;
+            case 36:
+                return DSCP_36;
+            case 37:
+                return DSCP_37;
+            case 38:
+                return DSCP_38;
+            case 39:
+                return DSCP_39;
+            case 40:
+                return DSCP_40;
+            case 41:
+                return DSCP_41;
+            case 42:
+                return DSCP_42;
+            case 43:
+                return DSCP_43;
+            case 44:
+                return DSCP_44;
+            case 45:
+                return DSCP_45;
+            case 46:
+                return DSCP_46;
+            case 47:
+                return DSCP_47;
+            case 48:
+                return DSCP_48;
+            case 49:
+                return DSCP_49;
+            case 50:
+                return DSCP_50;
+            case 51:
+                return DSCP_51;
+            case 52:
+                return DSCP_52;
+            case 53:
+                return DSCP_53;
+            case 54:
+                return DSCP_54;
+            case 55:
+                return DSCP_55;
+            case 56:
+                return DSCP_56;
+            case 57:
+                return DSCP_57;
+            case 58:
+                return DSCP_58;
+            case 59:
+                return DSCP_59;
+            case 60:
+                return DSCP_60;
+            case 61:
+                return DSCP_61;
+            case 62:
+                return DSCP_62;
+            case 63:
+                return DSCP_63;
+            default:
+                throw new IllegalArgumentException("Illegal IPv4 DSCP value: " + dscp);
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toHexString(dscp);
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.dscp);
+    }
+
+    public static IpDscp readByte(ChannelBuffer c) throws OFParseError {
+        return IpDscp.of((byte)(c.readUnsignedByte()));
+    }
+
+    @Override
+    public IpDscp applyMask(IpDscp mask) {
+        return IpDscp.of((byte)(this.dscp & mask.dscp));
+    }
+
+    public byte getDscpValue() {
+        return dscp;
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte(dscp);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpEcn.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpEcn.java
new file mode 100644
index 0000000..654df01
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpEcn.java
@@ -0,0 +1,73 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+
+public enum IpEcn implements OFValueType<IpEcn> {
+    ECN_00((byte)0),
+    ECN_01((byte)1),
+    ECN_10((byte)2),
+    ECN_11((byte)3),
+    ECN_NO_MASK((byte)0xFF);
+
+    public static final IpEcn NONE = ECN_00;
+    public static final IpEcn NO_MASK = ECN_NO_MASK;
+    public static final IpEcn FULL_MASK = ECN_00;
+
+    static final int LENGTH = 1;
+
+    private final byte ecn;
+
+    private IpEcn(byte ecn) {
+        this.ecn = ecn;
+    }
+
+    public static IpEcn of(byte ecn) {
+        switch (ecn) {
+            case 0:
+                return ECN_00;
+            case 1:
+                return ECN_01;
+            case 2:
+                return ECN_10;
+            case 3:
+                return ECN_11;
+            default:
+                throw new IllegalArgumentException("Illegal IP ECN value: " + ecn);
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public String toString() {
+        return (ecn < 3 ? "0" : "") + Integer.toBinaryString(ecn);
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.ecn);
+    }
+
+    public static IpEcn readByte(ChannelBuffer c) throws OFParseError {
+        return IpEcn.of((byte)(c.readUnsignedByte()));
+    }
+
+    @Override
+    public IpEcn applyMask(IpEcn mask) {
+        return IpEcn.of((byte)(this.ecn & mask.ecn));
+    }
+
+    public byte getEcnValue() {
+        return ecn;
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte(ecn);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpProtocol.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpProtocol.java
new file mode 100644
index 0000000..69f497e
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/IpProtocol.java
@@ -0,0 +1,665 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+/**
+ * IP-Protocol field representation
+ *
+ * @author Yotam Harchol (yotam.harchol@bigswitch.com)
+ */
+public class IpProtocol implements OFValueType<IpProtocol> {
+
+    static final short MAX_PROTO = 0xFF;
+    static final int LENGTH = 1;
+
+    private final short proto;
+
+    static final short NUM_HOPOPT  = 0x00;
+    static final short NUM_ICMP    = 0x01;
+    static final short NUM_IGMP    = 0x02;
+    static final short NUM_GGP = 0x03;
+    static final short NUM_IPv4    = 0x04;
+    static final short NUM_ST  = 0x05;
+    static final short NUM_TCP = 0x06;
+    static final short NUM_CBT = 0x07;
+    static final short NUM_EGP = 0x08;
+    static final short NUM_IGP = 0x09;
+    static final short NUM_BBN_RCC_MON = 0x0A;
+    static final short NUM_NVP_II  = 0x0B;
+    static final short NUM_PUP = 0x0C;
+    static final short NUM_ARGUS   = 0x0D;
+    static final short NUM_EMCON   = 0x0E;
+    static final short NUM_XNET    = 0x0F;
+    static final short NUM_CHAOS   = 0x10;
+    static final short NUM_UDP = 0x11;
+    static final short NUM_MUX = 0x12;
+    static final short NUM_DCN_MEAS    = 0x13;
+    static final short NUM_HMP = 0x14;
+    static final short NUM_PRM = 0x15;
+    static final short NUM_XNS_IDP = 0x16;
+    static final short NUM_TRUNK_1 = 0x17;
+    static final short NUM_TRUNK_2 = 0x18;
+    static final short NUM_LEAF_1  = 0x19;
+    static final short NUM_LEAF_2  = 0x1A;
+    static final short NUM_RDP = 0x1B;
+    static final short NUM_IRTP    = 0x1C;
+    static final short NUM_ISO_TP4 = 0x1D;
+    static final short NUM_NETBLT  = 0x1E;
+    static final short NUM_MFE_NSP = 0x1F;
+    static final short NUM_MERIT_INP   = 0x20;
+    static final short NUM_DCCP    = 0x21;
+    static final short NUM_3PC = 0x22;
+    static final short NUM_IDPR    = 0x23;
+    static final short NUM_XTP = 0x24;
+    static final short NUM_DDP = 0x25;
+    static final short NUM_IDPR_CMTP   = 0x26;
+    static final short NUM_TP_PP   = 0x27;
+    static final short NUM_IL  = 0x28;
+    static final short NUM_IPv6    = 0x29;
+    static final short NUM_SDRP    = 0x2A;
+    static final short NUM_IPv6_ROUTE  = 0x2B;
+    static final short NUM_IPv6_FRAG   = 0x2C;
+    static final short NUM_IDRP    = 0x2D;
+    static final short NUM_RSVP    = 0x2E;
+    static final short NUM_GRE = 0x2F;
+    static final short NUM_MHRP    = 0x30;
+    static final short NUM_BNA = 0x31;
+    static final short NUM_ESP = 0x32;
+    static final short NUM_AH  = 0x33;
+    static final short NUM_I_NLSP  = 0x34;
+    static final short NUM_SWIPE   = 0x35;
+    static final short NUM_NARP    = 0x36;
+    static final short NUM_MOBILE  = 0x37;
+    static final short NUM_TLSP    = 0x38;
+    static final short NUM_SKIP    = 0x39;
+    static final short NUM_IPv6_ICMP   = 0x3A;
+    static final short NUM_IPv6_NO_NXT = 0x3B;
+    static final short NUM_IPv6_OPTS   = 0x3C;
+    static final short NUM_HOST_INTERNAL   = 0x3D;
+    static final short NUM_CFTP    = 0x3E;
+    static final short NUM_LOCAL_NET   = 0x3F;
+    static final short NUM_SAT_EXPAK   = 0x40;
+    static final short NUM_KRYPTOLAN   = 0x41;
+    static final short NUM_RVD = 0x42;
+    static final short NUM_IPPC    = 0x43;
+    static final short NUM_DIST_FS = 0x44;
+    static final short NUM_SAT_MON = 0x45;
+    static final short NUM_VISA    = 0x46;
+    static final short NUM_IPCV    = 0x47;
+    static final short NUM_CPNX    = 0x48;
+    static final short NUM_CPHB    = 0x49;
+    static final short NUM_WSN = 0x4A;
+    static final short NUM_PVP = 0x4B;
+    static final short NUM_BR_SAT_MON  = 0x4C;
+    static final short NUM_SUN_ND  = 0x4D;
+    static final short NUM_WB_MON  = 0x4E;
+    static final short NUM_WB_EXPAK    = 0x4F;
+    static final short NUM_ISO_IP  = 0x50;
+    static final short NUM_VMTP    = 0x51;
+    static final short NUM_SECURE_VMTP = 0x52;
+    static final short NUM_VINES   = 0x53;
+    static final short NUM_TTP_IPTM = 0x54;
+    static final short NUM_NSFNET_IGP  = 0x55;
+    static final short NUM_DGP = 0x56;
+    static final short NUM_TCF = 0x57;
+    static final short NUM_EIGRP   = 0x58;
+    static final short NUM_OSPF    = 0x59;
+    static final short NUM_Sprite_RPC  = 0x5A;
+    static final short NUM_LARP    = 0x5B;
+    static final short NUM_MTP = 0x5C;
+    static final short NUM_AX_25   = 0x5D;
+    static final short NUM_IPIP    = 0x5E;
+    static final short NUM_MICP    = 0x5F;
+    static final short NUM_SCC_SP  = 0x60;
+    static final short NUM_ETHERIP = 0x61;
+    static final short NUM_ENCAP   = 0x62;
+    static final short NUM_PRIVATE_ENCRYPT = 0x63;
+    static final short NUM_GMTP    = 0x64;
+    static final short NUM_IFMP    = 0x65;
+    static final short NUM_PNNI    = 0x66;
+    static final short NUM_PIM = 0x67;
+    static final short NUM_ARIS    = 0x68;
+    static final short NUM_SCPS    = 0x69;
+    static final short NUM_QNX = 0x6A;
+    static final short NUM_A_N = 0x6B;
+    static final short NUM_IP_COMP = 0x6C;
+    static final short NUM_SNP = 0x6D;
+    static final short NUM_COMPAQ_PEER = 0x6E;
+    static final short NUM_IPX_IN_IP   = 0x6F;
+    static final short NUM_VRRP    = 0x70;
+    static final short NUM_PGM = 0x71;
+    static final short NUM_ZERO_HOP    = 0x72;
+    static final short NUM_L2TP    = 0x73;
+    static final short NUM_DDX = 0x74;
+    static final short NUM_IATP    = 0x75;
+    static final short NUM_STP = 0x76;
+    static final short NUM_SRP = 0x77;
+    static final short NUM_UTI = 0x78;
+    static final short NUM_SMP = 0x79;
+    static final short NUM_SM  = 0x7A;
+    static final short NUM_PTP = 0x7B;
+    static final short NUM_IS_IS_OVER_IPv4 = 0x7C;
+    static final short NUM_FIRE    = 0x7D;
+    static final short NUM_CRTP    = 0x7E;
+    static final short NUM_CRUDP   = 0x7F;
+    static final short NUM_SSCOPMCE    = 0x80;
+    static final short NUM_IPLT    = 0x81;
+    static final short NUM_SPS = 0x82;
+    static final short NUM_PIPE    = 0x83;
+    static final short NUM_SCTP    = 0x84;
+    static final short NUM_FC  = 0x85;
+    static final short NUM_RSVP_E2E_IGNORE = 0x86;
+    static final short NUM_MOBILITY_HEADER = 0x87;
+    static final short NUM_UDP_LITE    = 0x88;
+    static final short NUM_MPLS_IN_IP  = 0x89;
+    static final short NUM_MANET   = 0x8A;
+    static final short NUM_HIP = 0x8B;
+    static final short NUM_SHIM6   = 0x8C;
+
+    public static final IpProtocol HOPOPT = new IpProtocol(NUM_HOPOPT);
+    public static final IpProtocol ICMP = new IpProtocol(NUM_ICMP);
+    public static final IpProtocol IGMP = new IpProtocol(NUM_IGMP);
+    public static final IpProtocol GGP = new IpProtocol(NUM_GGP);
+    public static final IpProtocol IPv4 = new IpProtocol(NUM_IPv4);
+    public static final IpProtocol ST = new IpProtocol(NUM_ST);
+    public static final IpProtocol TCP = new IpProtocol(NUM_TCP);
+    public static final IpProtocol CBT = new IpProtocol(NUM_CBT);
+    public static final IpProtocol EGP = new IpProtocol(NUM_EGP);
+    public static final IpProtocol IGP = new IpProtocol(NUM_IGP);
+    public static final IpProtocol BBN_RCC_MON = new IpProtocol(NUM_BBN_RCC_MON);
+    public static final IpProtocol NVP_II = new IpProtocol(NUM_NVP_II);
+    public static final IpProtocol PUP = new IpProtocol(NUM_PUP);
+    public static final IpProtocol ARGUS = new IpProtocol(NUM_ARGUS);
+    public static final IpProtocol EMCON = new IpProtocol(NUM_EMCON);
+    public static final IpProtocol XNET = new IpProtocol(NUM_XNET);
+    public static final IpProtocol CHAOS = new IpProtocol(NUM_CHAOS);
+    public static final IpProtocol UDP = new IpProtocol(NUM_UDP);
+    public static final IpProtocol MUX = new IpProtocol(NUM_MUX);
+    public static final IpProtocol DCN_MEAS = new IpProtocol(NUM_DCN_MEAS);
+    public static final IpProtocol HMP = new IpProtocol(NUM_HMP);
+    public static final IpProtocol PRM = new IpProtocol(NUM_PRM);
+    public static final IpProtocol XNS_IDP = new IpProtocol(NUM_XNS_IDP);
+    public static final IpProtocol TRUNK_1 = new IpProtocol(NUM_TRUNK_1);
+    public static final IpProtocol TRUNK_2 = new IpProtocol(NUM_TRUNK_2);
+    public static final IpProtocol LEAF_1 = new IpProtocol(NUM_LEAF_1);
+    public static final IpProtocol LEAF_2 = new IpProtocol(NUM_LEAF_2);
+    public static final IpProtocol RDP = new IpProtocol(NUM_RDP);
+    public static final IpProtocol IRTP = new IpProtocol(NUM_IRTP);
+    public static final IpProtocol ISO_TP4 = new IpProtocol(NUM_ISO_TP4);
+    public static final IpProtocol NETBLT = new IpProtocol(NUM_NETBLT);
+    public static final IpProtocol MFE_NSP = new IpProtocol(NUM_MFE_NSP);
+    public static final IpProtocol MERIT_INP = new IpProtocol(NUM_MERIT_INP);
+    public static final IpProtocol DCCP = new IpProtocol(NUM_DCCP);
+    public static final IpProtocol _3PC = new IpProtocol(NUM_3PC);
+    public static final IpProtocol IDPR = new IpProtocol(NUM_IDPR);
+    public static final IpProtocol XTP = new IpProtocol(NUM_XTP);
+    public static final IpProtocol DDP = new IpProtocol(NUM_DDP);
+    public static final IpProtocol IDPR_CMTP = new IpProtocol(NUM_IDPR_CMTP);
+    public static final IpProtocol TP_PP = new IpProtocol(NUM_TP_PP);
+    public static final IpProtocol IL = new IpProtocol(NUM_IL);
+    public static final IpProtocol IPv6 = new IpProtocol(NUM_IPv6);
+    public static final IpProtocol SDRP = new IpProtocol(NUM_SDRP);
+    public static final IpProtocol IPv6_ROUTE = new IpProtocol(NUM_IPv6_ROUTE);
+    public static final IpProtocol IPv6_FRAG = new IpProtocol(NUM_IPv6_FRAG);
+    public static final IpProtocol IDRP = new IpProtocol(NUM_IDRP);
+    public static final IpProtocol RSVP = new IpProtocol(NUM_RSVP);
+    public static final IpProtocol GRE = new IpProtocol(NUM_GRE);
+    public static final IpProtocol MHRP = new IpProtocol(NUM_MHRP);
+    public static final IpProtocol BNA = new IpProtocol(NUM_BNA);
+    public static final IpProtocol ESP = new IpProtocol(NUM_ESP);
+    public static final IpProtocol AH = new IpProtocol(NUM_AH);
+    public static final IpProtocol I_NLSP = new IpProtocol(NUM_I_NLSP);
+    public static final IpProtocol SWIPE = new IpProtocol(NUM_SWIPE);
+    public static final IpProtocol NARP = new IpProtocol(NUM_NARP);
+    public static final IpProtocol MOBILE = new IpProtocol(NUM_MOBILE);
+    public static final IpProtocol TLSP = new IpProtocol(NUM_TLSP);
+    public static final IpProtocol SKIP = new IpProtocol(NUM_SKIP);
+    public static final IpProtocol IPv6_ICMP = new IpProtocol(NUM_IPv6_ICMP);
+    public static final IpProtocol IPv6_NO_NXT = new IpProtocol(NUM_IPv6_NO_NXT);
+    public static final IpProtocol IPv6_OPTS = new IpProtocol(NUM_IPv6_OPTS);
+    public static final IpProtocol HOST_INTERNAL = new IpProtocol(NUM_HOST_INTERNAL);
+    public static final IpProtocol CFTP = new IpProtocol(NUM_CFTP);
+    public static final IpProtocol LOCAL_NET = new IpProtocol(NUM_LOCAL_NET);
+    public static final IpProtocol SAT_EXPAK = new IpProtocol(NUM_SAT_EXPAK);
+    public static final IpProtocol KRYPTOLAN = new IpProtocol(NUM_KRYPTOLAN);
+    public static final IpProtocol RVD = new IpProtocol(NUM_RVD);
+    public static final IpProtocol IPPC = new IpProtocol(NUM_IPPC);
+    public static final IpProtocol DIST_FS = new IpProtocol(NUM_DIST_FS);
+    public static final IpProtocol SAT_MON = new IpProtocol(NUM_SAT_MON);
+    public static final IpProtocol VISA = new IpProtocol(NUM_VISA);
+    public static final IpProtocol IPCV = new IpProtocol(NUM_IPCV);
+    public static final IpProtocol CPNX = new IpProtocol(NUM_CPNX);
+    public static final IpProtocol CPHB = new IpProtocol(NUM_CPHB);
+    public static final IpProtocol WSN = new IpProtocol(NUM_WSN);
+    public static final IpProtocol PVP = new IpProtocol(NUM_PVP);
+    public static final IpProtocol BR_SAT_MON = new IpProtocol(NUM_BR_SAT_MON);
+    public static final IpProtocol SUN_ND = new IpProtocol(NUM_SUN_ND);
+    public static final IpProtocol WB_MON = new IpProtocol(NUM_WB_MON);
+    public static final IpProtocol WB_EXPAK = new IpProtocol(NUM_WB_EXPAK);
+    public static final IpProtocol ISO_IP = new IpProtocol(NUM_ISO_IP);
+    public static final IpProtocol VMTP = new IpProtocol(NUM_VMTP);
+    public static final IpProtocol SECURE_VMTP = new IpProtocol(NUM_SECURE_VMTP);
+    public static final IpProtocol VINES = new IpProtocol(NUM_VINES);
+    public static final IpProtocol TTP_IPTM = new IpProtocol(NUM_TTP_IPTM);
+    public static final IpProtocol NSFNET_IGP = new IpProtocol(NUM_NSFNET_IGP);
+    public static final IpProtocol DGP = new IpProtocol(NUM_DGP);
+    public static final IpProtocol TCF = new IpProtocol(NUM_TCF);
+    public static final IpProtocol EIGRP = new IpProtocol(NUM_EIGRP);
+    public static final IpProtocol OSPF = new IpProtocol(NUM_OSPF);
+    public static final IpProtocol Sprite_RPC = new IpProtocol(NUM_Sprite_RPC);
+    public static final IpProtocol LARP = new IpProtocol(NUM_LARP);
+    public static final IpProtocol MTP = new IpProtocol(NUM_MTP);
+    public static final IpProtocol AX_25 = new IpProtocol(NUM_AX_25);
+    public static final IpProtocol IPIP = new IpProtocol(NUM_IPIP);
+    public static final IpProtocol MICP = new IpProtocol(NUM_MICP);
+    public static final IpProtocol SCC_SP = new IpProtocol(NUM_SCC_SP);
+    public static final IpProtocol ETHERIP = new IpProtocol(NUM_ETHERIP);
+    public static final IpProtocol ENCAP = new IpProtocol(NUM_ENCAP);
+    public static final IpProtocol PRIVATE_ENCRYPT = new IpProtocol(NUM_PRIVATE_ENCRYPT);
+    public static final IpProtocol GMTP = new IpProtocol(NUM_GMTP);
+    public static final IpProtocol IFMP = new IpProtocol(NUM_IFMP);
+    public static final IpProtocol PNNI = new IpProtocol(NUM_PNNI);
+    public static final IpProtocol PIM = new IpProtocol(NUM_PIM);
+    public static final IpProtocol ARIS = new IpProtocol(NUM_ARIS);
+    public static final IpProtocol SCPS = new IpProtocol(NUM_SCPS);
+    public static final IpProtocol QNX = new IpProtocol(NUM_QNX);
+    public static final IpProtocol A_N = new IpProtocol(NUM_A_N);
+    public static final IpProtocol IP_COMP = new IpProtocol(NUM_IP_COMP);
+    public static final IpProtocol SNP = new IpProtocol(NUM_SNP);
+    public static final IpProtocol COMPAQ_PEER = new IpProtocol(NUM_COMPAQ_PEER);
+    public static final IpProtocol IPX_IN_IP = new IpProtocol(NUM_IPX_IN_IP);
+    public static final IpProtocol VRRP = new IpProtocol(NUM_VRRP);
+    public static final IpProtocol PGM = new IpProtocol(NUM_PGM);
+    public static final IpProtocol ZERO_HOP = new IpProtocol(NUM_ZERO_HOP);
+    public static final IpProtocol L2TP = new IpProtocol(NUM_L2TP);
+    public static final IpProtocol DDX = new IpProtocol(NUM_DDX);
+    public static final IpProtocol IATP = new IpProtocol(NUM_IATP);
+    public static final IpProtocol STP = new IpProtocol(NUM_STP);
+    public static final IpProtocol SRP = new IpProtocol(NUM_SRP);
+    public static final IpProtocol UTI = new IpProtocol(NUM_UTI);
+    public static final IpProtocol SMP = new IpProtocol(NUM_SMP);
+    public static final IpProtocol SM = new IpProtocol(NUM_SM);
+    public static final IpProtocol PTP = new IpProtocol(NUM_PTP);
+    public static final IpProtocol IS_IS_OVER_IPv4 = new IpProtocol(NUM_IS_IS_OVER_IPv4);
+    public static final IpProtocol FIRE = new IpProtocol(NUM_FIRE);
+    public static final IpProtocol CRTP = new IpProtocol(NUM_CRTP);
+    public static final IpProtocol CRUDP = new IpProtocol(NUM_CRUDP);
+    public static final IpProtocol SSCOPMCE = new IpProtocol(NUM_SSCOPMCE);
+    public static final IpProtocol IPLT = new IpProtocol(NUM_IPLT);
+    public static final IpProtocol SPS = new IpProtocol(NUM_SPS);
+    public static final IpProtocol PIPE = new IpProtocol(NUM_PIPE);
+    public static final IpProtocol SCTP = new IpProtocol(NUM_SCTP);
+    public static final IpProtocol FC = new IpProtocol(NUM_FC);
+    public static final IpProtocol RSVP_E2E_IGNORE = new IpProtocol(NUM_RSVP_E2E_IGNORE);
+    public static final IpProtocol MOBILITY_HEADER = new IpProtocol(NUM_MOBILITY_HEADER);
+    public static final IpProtocol UDP_LITE = new IpProtocol(NUM_UDP_LITE);
+    public static final IpProtocol MPLS_IN_IP = new IpProtocol(NUM_MPLS_IN_IP);
+    public static final IpProtocol MANET = new IpProtocol(NUM_MANET);
+    public static final IpProtocol HIP = new IpProtocol(NUM_HIP);
+    public static final IpProtocol SHIM6 = new IpProtocol(NUM_SHIM6);
+
+    public static final IpProtocol NONE = HOPOPT;
+
+    public static final IpProtocol NO_MASK = HOPOPT;
+    public static final IpProtocol FULL_MASK = new IpProtocol((short)0x0000);
+
+    private IpProtocol(short version) {
+        this.proto = version;
+    }
+
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public static IpProtocol of(short proto) {
+        switch (proto) {
+            case NUM_HOPOPT:
+                return HOPOPT;
+            case NUM_ICMP:
+                return ICMP;
+            case NUM_IGMP:
+                return IGMP;
+            case NUM_GGP:
+                return GGP;
+            case NUM_IPv4:
+                return IPv4;
+            case NUM_ST:
+                return ST;
+            case NUM_TCP:
+                return TCP;
+            case NUM_CBT:
+                return CBT;
+            case NUM_EGP:
+                return EGP;
+            case NUM_IGP:
+                return IGP;
+            case NUM_BBN_RCC_MON:
+                return BBN_RCC_MON;
+            case NUM_NVP_II:
+                return NVP_II;
+            case NUM_PUP:
+                return PUP;
+            case NUM_ARGUS:
+                return ARGUS;
+            case NUM_EMCON:
+                return EMCON;
+            case NUM_XNET:
+                return XNET;
+            case NUM_CHAOS:
+                return CHAOS;
+            case NUM_UDP:
+                return UDP;
+            case NUM_MUX:
+                return MUX;
+            case NUM_DCN_MEAS:
+                return DCN_MEAS;
+            case NUM_HMP:
+                return HMP;
+            case NUM_PRM:
+                return PRM;
+            case NUM_XNS_IDP:
+                return XNS_IDP;
+            case NUM_TRUNK_1:
+                return TRUNK_1;
+            case NUM_TRUNK_2:
+                return TRUNK_2;
+            case NUM_LEAF_1:
+                return LEAF_1;
+            case NUM_LEAF_2:
+                return LEAF_2;
+            case NUM_RDP:
+                return RDP;
+            case NUM_IRTP:
+                return IRTP;
+            case NUM_ISO_TP4:
+                return ISO_TP4;
+            case NUM_NETBLT:
+                return NETBLT;
+            case NUM_MFE_NSP:
+                return MFE_NSP;
+            case NUM_MERIT_INP:
+                return MERIT_INP;
+            case NUM_DCCP:
+                return DCCP;
+            case NUM_3PC:
+                return _3PC;
+            case NUM_IDPR:
+                return IDPR;
+            case NUM_XTP:
+                return XTP;
+            case NUM_DDP:
+                return DDP;
+            case NUM_IDPR_CMTP:
+                return IDPR_CMTP;
+            case NUM_TP_PP:
+                return TP_PP;
+            case NUM_IL:
+                return IL;
+            case NUM_IPv6:
+                return IPv6;
+            case NUM_SDRP:
+                return SDRP;
+            case NUM_IPv6_ROUTE:
+                return IPv6_ROUTE;
+            case NUM_IPv6_FRAG:
+                return IPv6_FRAG;
+            case NUM_IDRP:
+                return IDRP;
+            case NUM_RSVP:
+                return RSVP;
+            case NUM_GRE:
+                return GRE;
+            case NUM_MHRP:
+                return MHRP;
+            case NUM_BNA:
+                return BNA;
+            case NUM_ESP:
+                return ESP;
+            case NUM_AH:
+                return AH;
+            case NUM_I_NLSP:
+                return I_NLSP;
+            case NUM_SWIPE:
+                return SWIPE;
+            case NUM_NARP:
+                return NARP;
+            case NUM_MOBILE:
+                return MOBILE;
+            case NUM_TLSP:
+                return TLSP;
+            case NUM_SKIP:
+                return SKIP;
+            case NUM_IPv6_ICMP:
+                return IPv6_ICMP;
+            case NUM_IPv6_NO_NXT:
+                return IPv6_NO_NXT;
+            case NUM_IPv6_OPTS:
+                return IPv6_OPTS;
+            case NUM_HOST_INTERNAL:
+                return HOST_INTERNAL;
+            case NUM_CFTP:
+                return CFTP;
+            case NUM_LOCAL_NET:
+                return LOCAL_NET;
+            case NUM_SAT_EXPAK:
+                return SAT_EXPAK;
+            case NUM_KRYPTOLAN:
+                return KRYPTOLAN;
+            case NUM_RVD:
+                return RVD;
+            case NUM_IPPC:
+                return IPPC;
+            case NUM_DIST_FS:
+                return DIST_FS;
+            case NUM_SAT_MON:
+                return SAT_MON;
+            case NUM_VISA:
+                return VISA;
+            case NUM_IPCV:
+                return IPCV;
+            case NUM_CPNX:
+                return CPNX;
+            case NUM_CPHB:
+                return CPHB;
+            case NUM_WSN:
+                return WSN;
+            case NUM_PVP:
+                return PVP;
+            case NUM_BR_SAT_MON:
+                return BR_SAT_MON;
+            case NUM_SUN_ND:
+                return SUN_ND;
+            case NUM_WB_MON:
+                return WB_MON;
+            case NUM_WB_EXPAK:
+                return WB_EXPAK;
+            case NUM_ISO_IP:
+                return ISO_IP;
+            case NUM_VMTP:
+                return VMTP;
+            case NUM_SECURE_VMTP:
+                return SECURE_VMTP;
+            case NUM_VINES:
+                return VINES;
+            case NUM_TTP_IPTM:
+                return TTP_IPTM;
+            case NUM_NSFNET_IGP:
+                return NSFNET_IGP;
+            case NUM_DGP:
+                return DGP;
+            case NUM_TCF:
+                return TCF;
+            case NUM_EIGRP:
+                return EIGRP;
+            case NUM_OSPF:
+                return OSPF;
+            case NUM_Sprite_RPC:
+                return Sprite_RPC;
+            case NUM_LARP:
+                return LARP;
+            case NUM_MTP:
+                return MTP;
+            case NUM_AX_25:
+                return AX_25;
+            case NUM_IPIP:
+                return IPIP;
+            case NUM_MICP:
+                return MICP;
+            case NUM_SCC_SP:
+                return SCC_SP;
+            case NUM_ETHERIP:
+                return ETHERIP;
+            case NUM_ENCAP:
+                return ENCAP;
+            case NUM_PRIVATE_ENCRYPT:
+                return PRIVATE_ENCRYPT;
+            case NUM_GMTP:
+                return GMTP;
+            case NUM_IFMP:
+                return IFMP;
+            case NUM_PNNI:
+                return PNNI;
+            case NUM_PIM:
+                return PIM;
+            case NUM_ARIS:
+                return ARIS;
+            case NUM_SCPS:
+                return SCPS;
+            case NUM_QNX:
+                return QNX;
+            case NUM_A_N:
+                return A_N;
+            case NUM_IP_COMP:
+                return IP_COMP;
+            case NUM_SNP:
+                return SNP;
+            case NUM_COMPAQ_PEER:
+                return COMPAQ_PEER;
+            case NUM_IPX_IN_IP:
+                return IPX_IN_IP;
+            case NUM_VRRP:
+                return VRRP;
+            case NUM_PGM:
+                return PGM;
+            case NUM_ZERO_HOP:
+                return ZERO_HOP;
+            case NUM_L2TP:
+                return L2TP;
+            case NUM_DDX:
+                return DDX;
+            case NUM_IATP:
+                return IATP;
+            case NUM_STP:
+                return STP;
+            case NUM_SRP:
+                return SRP;
+            case NUM_UTI:
+                return UTI;
+            case NUM_SMP:
+                return SMP;
+            case NUM_SM:
+                return SM;
+            case NUM_PTP:
+                return PTP;
+            case NUM_IS_IS_OVER_IPv4:
+                return IS_IS_OVER_IPv4;
+            case NUM_FIRE:
+                return FIRE;
+            case NUM_CRTP:
+                return CRTP;
+            case NUM_CRUDP:
+                return CRUDP;
+            case NUM_SSCOPMCE:
+                return SSCOPMCE;
+            case NUM_IPLT:
+                return IPLT;
+            case NUM_SPS:
+                return SPS;
+            case NUM_PIPE:
+                return PIPE;
+            case NUM_SCTP:
+                return SCTP;
+            case NUM_FC:
+                return FC;
+            case NUM_RSVP_E2E_IGNORE:
+                return RSVP_E2E_IGNORE;
+            case NUM_MOBILITY_HEADER:
+                return MOBILITY_HEADER;
+            case NUM_UDP_LITE:
+                return UDP_LITE;
+            case NUM_MPLS_IN_IP:
+                return MPLS_IN_IP;
+            case NUM_MANET:
+                return MANET;
+            case NUM_HIP:
+                return HIP;
+            case NUM_SHIM6:
+                return SHIM6;
+            default:
+                if (proto >= MAX_PROTO) {
+                    throw new IllegalArgumentException("Illegal IP protocol number: "
+                            + proto);
+                } else {
+                    return new IpProtocol(proto);
+                }
+        }
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toHexString(proto);
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.proto);
+    }
+
+    public static IpProtocol readByte(ChannelBuffer c) {
+        return IpProtocol.of(c.readUnsignedByte());
+    }
+
+    @Override
+    public IpProtocol applyMask(IpProtocol mask) {
+        return IpProtocol.of((short)(this.proto & mask.proto));
+    }
+
+    public short getIpProtocolNumber() {
+        return proto;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof IpProtocol))
+            return false;
+        IpProtocol o = (IpProtocol)obj;
+        if (o.proto != this.proto)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 37;
+        int result = 1;
+        result = prime * result + proto;
+        return result;
+    }
+
+
+    @Override
+    public int compareTo(IpProtocol o) {
+        return Shorts.compare(proto, o.proto);
+    }
+
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort(proto);
+    }
+
+}
\ No newline at end of file
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/LagId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/LagId.java
new file mode 100644
index 0000000..51364e1
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/LagId.java
@@ -0,0 +1,92 @@
+package org.projectfloodlight.openflow.types;
+
+import javax.annotation.concurrent.Immutable;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+@Immutable
+public class LagId implements OFValueType<LagId> {
+    static final int LENGTH = 4;
+    private final int rawValue;
+
+    private final static int NONE_VAL = 0;
+    public final static LagId NONE = new LagId(NONE_VAL);
+
+    private final static int NO_MASK_VAL = 0xFFFFFFFF;
+    public final static LagId NO_MASK = new LagId(NO_MASK_VAL);
+    public final static LagId FULL_MASK = NONE;
+
+    private LagId(final int rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    public static LagId of(final int raw) {
+        if(raw == NONE_VAL)
+            return NONE;
+        else if (raw == NO_MASK_VAL)
+            return NO_MASK;
+        return new LagId(raw);
+    }
+
+    public int getInt() {
+        return rawValue;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + rawValue;
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toString(rawValue);
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        LagId other = (LagId) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(rawValue);
+    }
+
+    public static LagId read4Bytes(ChannelBuffer c) {
+        return LagId.of(c.readInt());
+    }
+
+    @Override
+    public int compareTo(LagId o) {
+        return UnsignedInts.compare(rawValue, o.rawValue);
+    }
+
+    @Override
+    public LagId applyMask(LagId mask) {
+        return LagId.of(rawValue & mask.rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(rawValue);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/MacAddress.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/MacAddress.java
new file mode 100644
index 0000000..27d5b66
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/MacAddress.java
@@ -0,0 +1,192 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.util.HexString;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Longs;
+
+/**
+ * Wrapper around a 6 byte mac address.
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ */
+
+public class MacAddress implements OFValueType<MacAddress> {
+    static final int MacAddrLen = 6;
+    private final long rawValue;
+
+    private final static long NONE_VAL = 0x0L;
+    public static final MacAddress NONE = new MacAddress(NONE_VAL);
+
+    private final static long BROADCAST_VAL = 0x0000FFFFFFFFFFFFL;
+    public static final MacAddress BROADCAST = new MacAddress(BROADCAST_VAL);
+
+    public static final MacAddress NO_MASK = MacAddress.of(0xFFFFFFFFFFFFFFFFl);
+    public static final MacAddress FULL_MASK = MacAddress.of(0x0);
+
+    private static final long LLDP_MAC_ADDRESS_MASK = 0xfffffffffff0L;
+    private static final long LLDP_MAC_ADDRESS_VALUE = 0x0180c2000000L;
+
+    private MacAddress(final long rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    public static MacAddress of(final byte[] address) {
+        if (address.length != MacAddrLen)
+            throw new IllegalArgumentException(
+                    "Mac address byte array must be exactly 6 bytes long; length = " + address.length);
+        long raw =
+                (address[0] & 0xFFL) << 40 | (address[1] & 0xFFL) << 32
+                        | (address[2] & 0xFFL) << 24 | (address[3] & 0xFFL) << 16
+                        | (address[4] & 0xFFL) << 8 | (address[5] & 0xFFL);
+        return MacAddress.of(raw);
+    }
+
+    public static MacAddress of(long raw) {
+        raw &= BROADCAST_VAL;
+        if(raw == NONE_VAL)
+            return NONE;
+        if (raw == BROADCAST_VAL)
+            return BROADCAST;
+        return new MacAddress(raw);
+    }
+
+    public static MacAddress of(final String string) {
+        int index = 0;
+        int shift = 40;
+        final String FORMAT_ERROR = "Mac address is not well-formed. " +
+                "It must consist of 6 hex digit pairs separated by colons: ";
+
+        long raw = 0;
+        if (string.length() != 6 * 2 + 5)
+            throw new IllegalArgumentException(FORMAT_ERROR + string);
+
+        while (shift >= 0) {
+            int digit1 = Character.digit(string.charAt(index++), 16);
+            int digit2 = Character.digit(string.charAt(index++), 16);
+            if ((digit1 < 0) || (digit2 < 0))
+                throw new IllegalArgumentException(FORMAT_ERROR + string);
+            raw |= ((long) (digit1 << 4 | digit2)) << shift;
+
+            if (shift == 0)
+                break;
+            if (string.charAt(index++) != ':')
+                throw new IllegalArgumentException(FORMAT_ERROR + string);
+            shift -= 8;
+        }
+        return MacAddress.of(raw);
+    }
+
+    volatile byte[] bytesCache = null;
+
+    public byte[] getBytes() {
+        if (bytesCache == null) {
+            synchronized (this) {
+                if (bytesCache == null) {
+                    bytesCache =
+                            new byte[] { (byte) ((rawValue >> 40) & 0xFF),
+                                    (byte) ((rawValue >> 32) & 0xFF),
+                                    (byte) ((rawValue >> 24) & 0xFF),
+                                    (byte) ((rawValue >> 16) & 0xFF),
+                                    (byte) ((rawValue >> 8) & 0xFF),
+                                    (byte) ((rawValue >> 0) & 0xFF) };
+                }
+            }
+        }
+        return bytesCache;
+    }
+
+    /**
+     * Returns {@code true} if the MAC address is the broadcast address.
+     * @return {@code true} if the MAC address is the broadcast address.
+     */
+    public boolean isBroadcast() {
+        return this == BROADCAST;
+    }
+
+    /**
+     * Returns {@code true} if the MAC address is a multicast address.
+     * @return {@code true} if the MAC address is a multicast address.
+     */
+    public boolean isMulticast() {
+        if (isBroadcast()) {
+            return false;
+        }
+        return (rawValue & (0x01L << 40)) != 0;
+    }
+
+    /**
+     * Returns {@code true} if the MAC address is an LLDP mac address.
+     * @return {@code true} if the MAC address is an LLDP mac address.
+     */
+    public boolean isLLDPAddress() {
+        return (rawValue & LLDP_MAC_ADDRESS_MASK) == LLDP_MAC_ADDRESS_VALUE;
+    }
+
+    @Override
+    public int getLength() {
+        return MacAddrLen;
+    }
+
+    @Override
+    public String toString() {
+        return HexString.toHexString(rawValue, 6);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (rawValue ^ (rawValue >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        MacAddress other = (MacAddress) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    public long getLong() {
+        return rawValue;
+    }
+
+    public void write6Bytes(ChannelBuffer c) {
+        c.writeInt((int) (this.rawValue >> 16));
+        c.writeShort((int) this.rawValue & 0xFFFF);
+    }
+
+    public static MacAddress read6Bytes(ChannelBuffer c) throws OFParseError {
+        long raw = c.readUnsignedInt() << 16 | c.readUnsignedShort();
+        return MacAddress.of(raw);
+    }
+
+    @Override
+    public MacAddress applyMask(MacAddress mask) {
+        return MacAddress.of(this.rawValue & mask.rawValue);
+    }
+
+    @Override
+    public int compareTo(MacAddress o) {
+        return Longs.compare(rawValue, o.rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt((int) (this.rawValue >> 16));
+        sink.putShort((short) (this.rawValue & 0xFFFF));
+    }
+
+
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/Masked.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/Masked.java
new file mode 100644
index 0000000..b5a995d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/Masked.java
@@ -0,0 +1,97 @@
+package org.projectfloodlight.openflow.types;
+
+import com.google.common.hash.PrimitiveSink;
+
+
+
+public class Masked<T extends OFValueType<T>> implements OFValueType<Masked<T>> {
+    protected final T value;
+
+    /** bitmask of the value. Note: a set (1) bit in this mask means 'match on this value'.
+     *  This the natural mask represenation as in IPv[46] netmasks. It is the inverse of the
+     *  OpenFlow 1.0 'wildcard' meaning.
+     */
+    protected final T mask;
+
+    protected Masked(T value, T mask) {
+        this.value = value.applyMask(mask);
+        this.mask = mask;
+    }
+
+    public T getValue() {
+        return value;
+    }
+
+    public T getMask() {
+        return mask;
+    }
+
+    public static <T extends OFValueType<T>> Masked<T> of(T value, T mask) {
+        return new Masked<T>(value, mask);
+    }
+
+    @Override
+    public int getLength() {
+        return this.value.getLength() + this.mask.getLength();
+    }
+
+    @Override
+    public String toString() {
+        // General representation: value/mask
+        StringBuilder sb = new StringBuilder();
+        sb.append(value.toString()).append('/').append(mask.toString());
+        return sb.toString();
+    }
+
+    /** Determine whether candidate value is matched by this masked value
+     *  (i.e., does candiate lie in the 'network/range' specified by this masked
+     *  value).
+     *
+     * @param candidate the candidate value to test
+     * @return true iff the candidate lies in the area specified by this masked
+     *         value.
+     */
+    public boolean matches(T candidate) {
+        // candidate lies in the area of this masked value if its
+        // value with the masked bit zero'ed out equals this's value
+        // (e.g., our 'network address' for networks)
+        return candidate.applyMask(this.mask).equals(this.value);
+    }
+
+    @Override
+    public Masked<T> applyMask(Masked<T> mask) {
+        return this;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof Masked<?>))
+            return false;
+        Masked<?> mobj = (Masked<?>)obj;
+        return this.value.equals(mobj.value) && this.mask.equals(mobj.mask);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 59;
+        int result = 1;
+        result = prime * result + this.value.hashCode();
+        result = prime * result + this.mask.hashCode();
+        return result;
+    }
+
+    @Override
+    public int compareTo(Masked<T> o) {
+        int res = value.compareTo(o.value);
+        if(res != 0)
+            return res;
+        else
+            return mask.compareTo(o.mask);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        value.putTo(sink);
+        mask.putTo(sink);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFAuxId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFAuxId.java
new file mode 100644
index 0000000..c8e04d2
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFAuxId.java
@@ -0,0 +1,86 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+public class OFAuxId implements Comparable<OFAuxId>, PrimitiveSinkable {
+    
+    private static final short VALIDATION_MASK = 0xFF;
+    
+    private static final short MAIN_VAL = 0x0000;
+    
+    public static final OFAuxId MAIN = new OFAuxId(MAIN_VAL);
+            
+    private final short id;
+
+    private OFAuxId(short id) {
+        this.id = id;
+    }
+
+    public static OFAuxId of(short id) {
+        switch(id) {
+            case MAIN_VAL:
+                return MAIN;
+            default:
+                if ((id & VALIDATION_MASK) != id)
+                    throw new IllegalArgumentException("Illegal Aux id value: " + id);
+                return new OFAuxId(id);
+        }
+    }
+
+    public static OFAuxId of(int id) {
+        if((id & VALIDATION_MASK) != id)
+            throw new IllegalArgumentException("Illegal Aux id value: "+id);
+        return of((short) id);
+    }
+
+    @Override
+    public String toString() {
+        return "0x" + Integer.toHexString(id);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + id;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        OFAuxId other = (OFAuxId) obj;
+        if (id != other.id) return false;
+        return true;
+    }
+
+    public short getValue() {
+        return id;
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.id);
+    }
+
+    public static OFAuxId readByte(ChannelBuffer c) throws OFParseError {
+        return OFAuxId.of(c.readUnsignedByte());
+    }
+
+
+    @Override
+    public int compareTo(OFAuxId other) {
+        return Shorts.compare(this.id, other.id);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte((byte) id);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBitMask128.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBitMask128.java
new file mode 100644
index 0000000..93f5a2d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBitMask128.java
@@ -0,0 +1,103 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+
+public class OFBitMask128 implements OFValueType<OFBitMask128> {
+
+    static final int LENGTH = 16;
+
+    private final long raw1; // MSBs (ports 64-127)
+    private final long raw2; // LSBs (ports 0-63)
+
+    public static final OFBitMask128 ALL = new OFBitMask128(-1, -1);
+    public static final OFBitMask128 NONE = new OFBitMask128(0, 0);
+
+    public static final OFBitMask128 NO_MASK = ALL;
+    public static final OFBitMask128 FULL_MASK = NONE;
+
+    private OFBitMask128(long raw1, long raw2) {
+        this.raw1 = raw1;
+        this.raw2 = raw2;
+    }
+
+    public static OFBitMask128 of(long raw1, long raw2) {
+        if (raw1 == -1 && raw2 == -1)
+            return ALL;
+        if (raw1 == 0 && raw2 == 0)
+            return NONE;
+        return new OFBitMask128(raw1, raw2);
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public OFBitMask128 applyMask(OFBitMask128 mask) {
+        return of(this.raw1 & mask.raw1, this.raw2 & mask.raw2);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof OFBitMask128))
+            return false;
+        OFBitMask128 other = (OFBitMask128)obj;
+        return (other.raw1 == this.raw1 && other.raw2 == this.raw2);
+    }
+
+    @Override
+    public int hashCode() {
+        return (int)(31 * raw1 + raw2);
+    }
+
+    protected static boolean isBitOn(long raw1, long raw2, int bit) {
+        if (bit < 0 || bit >= 128)
+            throw new IndexOutOfBoundsException();
+        long word;
+        if (bit < 64) {
+            word = raw2; // ports 0-63
+        } else {
+            word = raw1; // ports 64-127
+            bit -= 64;
+        }
+        return (word & ((long)1 << bit)) != 0;
+    }
+
+    public void write16Bytes(ChannelBuffer cb) {
+        cb.writeLong(raw1);
+        cb.writeLong(raw2);
+    }
+
+    public static OFBitMask128 read16Bytes(ChannelBuffer cb) {
+        long raw1 = cb.readLong();
+        long raw2 = cb.readLong();
+        return of(raw1, raw2);
+    }
+
+    public boolean isOn(int bit) {
+        return isBitOn(raw1, raw2, bit);
+    }
+
+    @Override
+    public String toString() {
+        return (String.format("%64s", Long.toBinaryString(raw2)) + String.format("%64s", Long.toBinaryString(raw1))).replaceAll(" ", "0");
+    }
+
+    @Override
+    public int compareTo(OFBitMask128 o) {
+        long c = this.raw1 - o.raw1;
+        if (c != 0)
+            return Long.signum(c);
+        return Long.signum(this.raw2 - o.raw2);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putLong(raw1);
+        sink.putLong(raw2);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBooleanValue.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBooleanValue.java
new file mode 100644
index 0000000..e276092
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBooleanValue.java
@@ -0,0 +1,110 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMessageReader;
+import org.projectfloodlight.openflow.protocol.Writeable;
+
+import com.google.common.hash.PrimitiveSink;
+
+public class OFBooleanValue implements Writeable, OFValueType<OFBooleanValue> {
+    public final static OFBooleanValue TRUE = new OFBooleanValue(true);
+    public final static OFBooleanValue FALSE = new OFBooleanValue(false);
+
+    public final static OFBooleanValue NO_MASK = TRUE;
+    public final static OFBooleanValue FULL_MASK = FALSE;
+
+    private final boolean value;
+
+    private OFBooleanValue(boolean value) {
+      this.value = value;
+    }
+
+    public static OFBooleanValue of(boolean value) {
+      return value ? TRUE : FALSE;
+    }
+
+    public boolean getValue() {
+      return value;
+    }
+
+    public int getInt() {
+      return value ? 1 : 0;
+    }
+
+    @Override
+    public String toString() {
+        return "" + value;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + getInt();
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        OFBooleanValue other = (OFBooleanValue) obj;
+        if (value != other.value)
+            return false;
+        return true;
+    }
+
+    @Override
+    public void writeTo(ChannelBuffer bb) {
+        bb.writeByte(getInt());
+    }
+
+    private static class Reader implements OFMessageReader<OFBooleanValue> {
+        @Override
+        public OFBooleanValue readFrom(ChannelBuffer bb) throws OFParseError {
+            return of(bb.readByte() != 0);
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return 1;
+    }
+
+    @Override
+    public OFBooleanValue applyMask(OFBooleanValue mask) {
+        return of(value && mask.value);
+    }
+
+    @Override
+    public int compareTo(OFBooleanValue o) {
+        return getInt() - o.getInt();
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte((byte)getInt());
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBufferId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBufferId.java
new file mode 100644
index 0000000..7f76b4d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFBufferId.java
@@ -0,0 +1,69 @@
+package org.projectfloodlight.openflow.types;
+
+import org.projectfloodlight.openflow.annotations.Immutable;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+/**
+ * Abstraction of a buffer id in OpenFlow. Immutable.
+ *
+ * @author Rob Vaterlaus <rob.vaterlaus@bigswitch.com>
+ */
+@Immutable
+public class OFBufferId implements Comparable<OFBufferId>, PrimitiveSinkable {
+    public static final OFBufferId NO_BUFFER = new OFBufferId(0xFFFFFFFF);
+
+    private final int rawValue;
+
+    private OFBufferId(int rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    public static OFBufferId of(final int rawValue) {
+        if (rawValue == NO_BUFFER.getInt())
+            return NO_BUFFER;
+        return new OFBufferId(rawValue);
+    }
+
+    public int getInt() {
+        return rawValue;
+    }
+
+    @Override
+    public String toString() {
+        return Long.toString(U32.f(rawValue));
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + rawValue;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        OFBufferId other = (OFBufferId) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int compareTo(OFBufferId o) {
+        return UnsignedInts.compare(rawValue, o.rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(rawValue);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFChecksum128.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFChecksum128.java
new file mode 100644
index 0000000..7578dc6
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFChecksum128.java
@@ -0,0 +1,80 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+
+public class OFChecksum128 implements OFValueType<OFChecksum128> {
+
+    static final int LENGTH = 16;
+
+    private final long raw1; // MSBs
+    private final long raw2; // LSBs
+
+    public static final OFChecksum128 ZERO = new OFChecksum128(0, 0);
+
+    private OFChecksum128(long raw1, long raw2) {
+        this.raw1 = raw1;
+        this.raw2 = raw2;
+    }
+
+    public static OFChecksum128 of(long raw1, long raw2) {
+        if (raw1 == 0 && raw2 == 0)
+            return ZERO;
+        return new OFChecksum128(raw1, raw2);
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public OFChecksum128 applyMask(OFChecksum128 mask) {
+        return of(this.raw1 & mask.raw1, this.raw2 & mask.raw2);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof OFChecksum128))
+            return false;
+        OFChecksum128 other = (OFChecksum128)obj;
+        return (other.raw1 == this.raw1 && other.raw2 == this.raw2);
+    }
+
+    @Override
+    public int hashCode() {
+        return (int)(31 * raw1 + raw2);
+    }
+
+    public void write16Bytes(ChannelBuffer cb) {
+        cb.writeLong(raw1);
+        cb.writeLong(raw2);
+    }
+
+    public static OFChecksum128 read16Bytes(ChannelBuffer cb) {
+        long raw1 = cb.readLong();
+        long raw2 = cb.readLong();
+        return of(raw1, raw2);
+    }
+
+    @Override
+    public String toString() {
+        return String.format("0x%016x%016x", raw1, raw2);
+    }
+
+    @Override
+    public int compareTo(OFChecksum128 o) {
+        long c = this.raw1 - o.raw1;
+        if (c != 0)
+            return Long.signum(c);
+        return Long.signum(this.raw2 - o.raw2);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putLong(raw1);
+        sink.putLong(raw2);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFGroup.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFGroup.java
new file mode 100644
index 0000000..b05d5fa
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFGroup.java
@@ -0,0 +1,156 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.annotations.Immutable;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+/**
+ * Abstraction of an logical / OpenFlow group (ofp_group) in OpenFlow.
+ * Immutable.
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ */
+@Immutable
+public class OFGroup implements OFValueType<OFGroup> {
+    static final int LENGTH = 4;
+
+    // private int constants (OF1.1+) to avoid duplication in the code
+    // should not have to use these outside this class
+    private static final int ZERO_VAL = 0x00;
+    private static final int MAX_VAL = 0xffffff00;
+    private static final int ALL_VAL = 0xfffffffc;
+    private static final int ANY_VAL = 0xffffffff;
+
+
+    // ////////////// public constants - use to access well known OpenFlow group constants
+
+    /** Maximum number of physical and logical switch groups. */
+    public final static OFGroup MAX = new NamedGroup(MAX_VAL, "max");
+
+    /** All groups */
+    public final static OFGroup ALL = new NamedGroup(ALL_VAL, "all");
+
+    /**
+     * Wildcard group used only for flow mod (delete) and flow stats requests. */
+    public final static OFGroup ANY = new NamedGroup(ANY_VAL, "any");
+
+    /** group 0 in case we need it */
+    public static final OFGroup ZERO = OFGroup.of(ZERO_VAL);
+
+    public static final OFGroup NO_MASK = ANY;
+    public static final OFGroup FULL_MASK = ZERO;
+
+    /** raw openflow group number as a signed 32 bit integer */
+    private final int groupNumber;
+
+    /** private constructor. use of*-Factory methods instead */
+    private OFGroup(final int portNumber) {
+        this.groupNumber = portNumber;
+    }
+
+    /**
+     * get an OFGroup object corresponding to a raw 32-bit integer group number.
+     * NOTE: The group object may either be newly allocated or cached. Do not
+     * rely on either behavior.
+     *
+     * @param groupNumber the raw 32-bit group number
+     * @return a corresponding OFPort
+     */
+    public static OFGroup of(final int groupNumber) {
+        switch(groupNumber) {
+            case ZERO_VAL:
+                return MAX;
+            case MAX_VAL:
+                return MAX;
+            case ALL_VAL:
+                return ALL;
+            case ANY_VAL:
+                return ANY;
+            default:
+                if(UnsignedInts.compare(groupNumber, MAX_VAL) > 0) {
+                    // greater than max_val, but not one of the reserved values
+                    throw new IllegalArgumentException("Unknown special group number: "
+                            + groupNumber);
+                }
+                return new OFGroup(groupNumber);
+        }
+    }
+
+    /** return the group number as a int32 */
+    public int getGroupNumber() {
+        return groupNumber;
+    }
+
+    @Override
+    public String toString() {
+        return UnsignedInts.toString(groupNumber);
+    }
+
+    /** Extension of OFGroup for named groups */
+    static class NamedGroup extends OFGroup {
+        private final String name;
+
+        NamedGroup(final int portNo, final String name) {
+            super(portNo);
+            this.name = name;
+        }
+
+        public String getName() {
+            return name;
+        }
+
+        @Override
+        public String toString() {
+            return name;
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof OFGroup))
+            return false;
+        OFGroup other = (OFGroup)obj;
+        if (other.groupNumber != this.groupNumber)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 53;
+        int result = 1;
+        result = prime * result + groupNumber;
+        return result;
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(this.groupNumber);
+    }
+
+    public static OFGroup read4Bytes(ChannelBuffer c) throws OFParseError {
+        return OFGroup.of(c.readInt());
+    }
+
+    @Override
+    public OFGroup applyMask(OFGroup mask) {
+        return OFGroup.of(this.groupNumber & mask.groupNumber);
+    }
+
+    @Override
+    public int compareTo(OFGroup o) {
+        return UnsignedInts.compare(this.groupNumber, o.groupNumber);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(groupNumber);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFHelloElement.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFHelloElement.java
new file mode 100644
index 0000000..10d06a0
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFHelloElement.java
@@ -0,0 +1,5 @@
+package org.projectfloodlight.openflow.types;
+
+public interface OFHelloElement {
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFMetadata.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFMetadata.java
new file mode 100644
index 0000000..fcabdcd
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFMetadata.java
@@ -0,0 +1,81 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+
+public class OFMetadata implements OFValueType<OFMetadata> {
+
+    static int LENGTH = 8;
+
+    private final U64 u64;
+
+    public static final OFMetadata NONE = OFMetadata.of(U64.ZERO);
+
+    public static final OFMetadata NO_MASK = OFMetadata.of(U64.ofRaw(0xFFFFFFFFFFFFFFFFl));
+    public static final OFMetadata FULL_MASK = OFMetadata.of(U64.ofRaw(0x0));
+
+    public OFMetadata(U64 ofRaw) {
+        u64 = ofRaw;
+    }
+
+    public static OFMetadata of(U64 u64) {
+        return new OFMetadata(u64);
+    }
+
+    public static OFMetadata ofRaw(long raw) {
+        return new OFMetadata(U64.ofRaw(raw));
+    }
+
+    public U64 getValue() {
+        return u64;
+    }
+
+    public static OFMetadata read8Bytes(ChannelBuffer cb) {
+        return OFMetadata.ofRaw(cb.readLong());
+    }
+
+    public void write8Bytes(ChannelBuffer cb) {
+        u64.writeTo(cb);
+    }
+
+    @Override
+    public int getLength() {
+        return u64.getLength();
+    }
+
+    @Override
+    public OFMetadata applyMask(OFMetadata mask) {
+        return OFMetadata.of(this.u64.applyMask(mask.u64));
+    }
+
+    @Override
+    public boolean equals(Object arg0) {
+        if (!(arg0 instanceof OFMetadata))
+            return false;
+        OFMetadata other = (OFMetadata)arg0;
+
+        return this.u64.equals(other.u64);
+    }
+
+    @Override
+    public int hashCode() {
+        int prime = 53;
+        return this.u64.hashCode() * prime;
+    }
+
+    @Override
+    public String toString() {
+        return "Metadata: " + u64.toString();
+    }
+
+    @Override
+    public int compareTo(OFMetadata o) {
+        return u64.compareTo(o.u64);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        u64.putTo(sink);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFPort.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFPort.java
new file mode 100644
index 0000000..155a9db
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFPort.java
@@ -0,0 +1,563 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.annotations.Immutable;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+/**
+ * Abstraction of an logical / OpenFlow switch port (ofp_port_no) in OpenFlow.
+ * Immutable. Note: Switch port numbers were changed in OpenFlow 1.1 from uint16
+ * to uint32. This class uses a 32 bit representation internally. Port numbers
+ * are converted from/to uint16 when constructed / getPortNumberasShort is
+ * called. If this port is not representable in OpenFlow 1.0, an
+ * IllegalStateException is raised.
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ */
+@Immutable
+public class OFPort implements OFValueType<OFPort> {
+    static final int LENGTH = 4;
+
+    // private int constants (OF1.1+) to avoid duplication in the code
+    // should not have to use these outside this class
+    private static final int OFPP_ANY_INT = 0xFFffFFff;
+    private static final int OFPP_LOCAL_INT = 0xFFffFFfe;
+    private static final int OFPP_CONTROLLER_INT = 0xFFffFFfd;
+    private static final int OFPP_ALL_INT = 0xFFffFFfc;
+    private static final int OFPP_FLOOD_INT = 0xFFffFFfb;
+    private static final int OFPP_NORMAL_INT = 0xFFffFFfa;
+    private static final int OFPP_TABLE_INT = 0xFFffFFf9;
+    private static final int OFPP_MAX_INT = 0xFFffFF00;
+    private static final int OFPP_IN_PORT_INT = 0xFFffFFf8;
+
+    // private short constants (OF1.0) to avoid duplication in the code
+    // should not have to use these outside this class
+    private static final short OFPP_ANY_SHORT = (short) 0xFFff;
+    private static final short OFPP_LOCAL_SHORT = (short) 0xFFfe;
+    private static final short OFPP_CONTROLLER_SHORT = (short) 0xFFfd;
+    private static final short OFPP_ALL_SHORT = (short) 0xFFfc;
+    private static final short OFPP_FLOOD_SHORT = (short) 0xFFfb;
+    private static final short OFPP_NORMAL_SHORT = (short) 0xFFfa;
+    private static final short OFPP_TABLE_SHORT = (short) 0xFFf9;
+    private static final short OFPP_IN_PORT_SHORT = (short) 0xFFf8;
+    private static final short OFPP_MAX_SHORT = (short) 0xFF00;
+    private static final int OFPP_MAX_SHORT_UNSIGNED = 0xFF00;
+
+    // ////////////// public constants - use to access well known OpenFlow ports
+
+    /** Maximum number of physical and logical switch ports. */
+    public final static OFPort MAX = new NamedPort(OFPP_MAX_INT, "max");
+
+    /**
+     * Send the packet out the input port. This reserved port must be explicitly
+     * used in order to send back out of the input port.
+     */
+    public final static OFPort IN_PORT = new NamedPort(OFPP_IN_PORT_INT, "in_port");
+
+    /**
+     * Submit the packet to the first flow table NB: This destination port can
+     * only be used in packet-out messages.
+     */
+    public final static OFPort TABLE = new NamedPort(OFPP_TABLE_INT, "table");
+
+    /** Process with normal L2/L3 switching. */
+    public final static OFPort NORMAL = new NamedPort(OFPP_NORMAL_INT, "normal");
+
+    /**
+     * All physical ports in VLAN, except input port and those blocked or link
+     * down
+     */
+    public final static OFPort FLOOD = new NamedPort(OFPP_FLOOD_INT, "flood");
+
+    /** All physical ports except input port */
+    public final static OFPort ALL = new NamedPort(OFPP_ALL_INT, "all");
+
+    /** Send to controller */
+    public final static OFPort CONTROLLER =
+            new NamedPort(OFPP_CONTROLLER_INT, "controller");
+
+    /** local openflow "port" */
+    public final static OFPort LOCAL = new NamedPort(OFPP_LOCAL_INT, "local");
+
+    /**
+     * Wildcard port used only for flow mod (delete) and flow stats requests.
+     * Selects all flows regardless of output port (including flows with no
+     * output port). NOTE: OpenFlow 1.0 calls this 'NONE'
+     */
+    public final static OFPort ANY = new NamedPort(OFPP_ANY_INT, "any");
+    /** the wildcarded default for OpenFlow 1.0 (value: 0). Elsewhere in OpenFlow
+     *  we need "ANY" as the default
+     */
+    public static final OFPort ZERO = OFPort.of(0);
+
+    public static final OFPort NO_MASK = OFPort.of(0xFFFFFFFF);
+    public static final OFPort FULL_MASK = ZERO;
+
+    /** cache of frequently used ports */
+    private static class PrecachedPort {
+        private final static OFPort p0 = new OFPort(0);
+        private final static OFPort p1 = new OFPort(1);
+        private final static OFPort p2 = new OFPort(2);
+        private final static OFPort p3 = new OFPort(3);
+        private final static OFPort p4 = new OFPort(4);
+        private final static OFPort p5 = new OFPort(5);
+        private final static OFPort p6 = new OFPort(6);
+        private final static OFPort p7 = new OFPort(7);
+        private final static OFPort p8 = new OFPort(8);
+        private final static OFPort p9 = new OFPort(9);
+        private final static OFPort p10 = new OFPort(10);
+        private final static OFPort p11 = new OFPort(11);
+        private final static OFPort p12 = new OFPort(12);
+        private final static OFPort p13 = new OFPort(13);
+        private final static OFPort p14 = new OFPort(14);
+        private final static OFPort p15 = new OFPort(15);
+        private final static OFPort p16 = new OFPort(16);
+        private final static OFPort p17 = new OFPort(17);
+        private final static OFPort p18 = new OFPort(18);
+        private final static OFPort p19 = new OFPort(19);
+        private final static OFPort p20 = new OFPort(20);
+        private final static OFPort p21 = new OFPort(21);
+        private final static OFPort p22 = new OFPort(22);
+        private final static OFPort p23 = new OFPort(23);
+        private final static OFPort p24 = new OFPort(24);
+        private final static OFPort p25 = new OFPort(25);
+        private final static OFPort p26 = new OFPort(26);
+        private final static OFPort p27 = new OFPort(27);
+        private final static OFPort p28 = new OFPort(28);
+        private final static OFPort p29 = new OFPort(29);
+        private final static OFPort p31 = new OFPort(31);
+        private final static OFPort p32 = new OFPort(32);
+        private final static OFPort p33 = new OFPort(33);
+        private final static OFPort p34 = new OFPort(34);
+        private final static OFPort p35 = new OFPort(35);
+        private final static OFPort p36 = new OFPort(36);
+        private final static OFPort p37 = new OFPort(37);
+        private final static OFPort p38 = new OFPort(38);
+        private final static OFPort p39 = new OFPort(39);
+        private final static OFPort p40 = new OFPort(40);
+        private final static OFPort p41 = new OFPort(41);
+        private final static OFPort p42 = new OFPort(42);
+        private final static OFPort p43 = new OFPort(43);
+        private final static OFPort p44 = new OFPort(44);
+        private final static OFPort p45 = new OFPort(45);
+        private final static OFPort p46 = new OFPort(46);
+        private final static OFPort p47 = new OFPort(47);
+        private final static OFPort p48 = new OFPort(48);
+    }
+
+    /** raw openflow port number as a signed 32 bit integer */
+    private final int portNumber;
+
+    /** private constructor. use of*-Factory methods instead */
+    private OFPort(final int portNumber) {
+        this.portNumber = portNumber;
+    }
+
+    /**
+     * get an OFPort object corresponding to a raw 32-bit integer port number.
+     * NOTE: The port object may either be newly allocated or cached. Do not
+     * rely on either behavior.
+     *
+     * @param portNumber
+     * @return a corresponding OFPort
+     */
+    public static OFPort ofInt(final int portNumber) {
+        switch (portNumber) {
+            case 0:
+                return PrecachedPort.p0;
+            case 1:
+                return PrecachedPort.p1;
+            case 2:
+                return PrecachedPort.p2;
+            case 3:
+                return PrecachedPort.p3;
+            case 4:
+                return PrecachedPort.p4;
+            case 5:
+                return PrecachedPort.p5;
+            case 6:
+                return PrecachedPort.p6;
+            case 7:
+                return PrecachedPort.p7;
+            case 8:
+                return PrecachedPort.p8;
+            case 9:
+                return PrecachedPort.p9;
+            case 10:
+                return PrecachedPort.p10;
+            case 11:
+                return PrecachedPort.p11;
+            case 12:
+                return PrecachedPort.p12;
+            case 13:
+                return PrecachedPort.p13;
+            case 14:
+                return PrecachedPort.p14;
+            case 15:
+                return PrecachedPort.p15;
+            case 16:
+                return PrecachedPort.p16;
+            case 17:
+                return PrecachedPort.p17;
+            case 18:
+                return PrecachedPort.p18;
+            case 19:
+                return PrecachedPort.p19;
+            case 20:
+                return PrecachedPort.p20;
+            case 21:
+                return PrecachedPort.p21;
+            case 22:
+                return PrecachedPort.p22;
+            case 23:
+                return PrecachedPort.p23;
+            case 24:
+                return PrecachedPort.p24;
+            case 25:
+                return PrecachedPort.p25;
+            case 26:
+                return PrecachedPort.p26;
+            case 27:
+                return PrecachedPort.p27;
+            case 28:
+                return PrecachedPort.p28;
+            case 29:
+                return PrecachedPort.p29;
+            case 31:
+                return PrecachedPort.p31;
+            case 32:
+                return PrecachedPort.p32;
+            case 33:
+                return PrecachedPort.p33;
+            case 34:
+                return PrecachedPort.p34;
+            case 35:
+                return PrecachedPort.p35;
+            case 36:
+                return PrecachedPort.p36;
+            case 37:
+                return PrecachedPort.p37;
+            case 38:
+                return PrecachedPort.p38;
+            case 39:
+                return PrecachedPort.p39;
+            case 40:
+                return PrecachedPort.p40;
+            case 41:
+                return PrecachedPort.p41;
+            case 42:
+                return PrecachedPort.p42;
+            case 43:
+                return PrecachedPort.p43;
+            case 44:
+                return PrecachedPort.p44;
+            case 45:
+                return PrecachedPort.p45;
+            case 46:
+                return PrecachedPort.p46;
+            case 47:
+                return PrecachedPort.p47;
+            case 48:
+                return PrecachedPort.p48;
+            case OFPP_MAX_INT:
+                return MAX;
+            case OFPP_IN_PORT_INT:
+                return IN_PORT;
+            case OFPP_TABLE_INT:
+                return TABLE;
+            case OFPP_NORMAL_INT:
+                return NORMAL;
+            case OFPP_FLOOD_INT:
+                return FLOOD;
+            case OFPP_ALL_INT:
+                return ALL;
+            case OFPP_CONTROLLER_INT:
+                return CONTROLLER;
+            case OFPP_LOCAL_INT:
+                return LOCAL;
+            case OFPP_ANY_INT:
+                return ANY;
+            default:
+                // note: This means effectively : portNumber > OFPP_MAX_SHORT
+                // accounting for
+                // signedness of both portNumber and OFPP_MAX_INT(which is
+                // -256).
+                // Any unsigned integer value > OFPP_MAX_INT will be ]-256:0[
+                // when read signed
+                if (portNumber < 0 && portNumber > OFPP_MAX_INT)
+                    throw new IllegalArgumentException("Unknown special port number: "
+                            + portNumber);
+                return new OFPort(portNumber);
+        }
+    }
+
+    /** convenience function: delegates to ofInt */
+    public static OFPort of(final int portNumber) {
+        return ofInt(portNumber);
+    }
+
+    /**
+     * get an OFPort object corresponding to a raw signed 16-bit integer port
+     * number (OF1.0). Note that the port returned will have the corresponding
+     * 32-bit integer value allocated as its port number. NOTE: The port object
+     * may either be newly allocated or cached. Do not rely on either behavior.
+     *
+     * @param portNumber
+     * @return a corresponding OFPort
+     */
+    public static OFPort ofShort(final short portNumber) {
+        switch (portNumber) {
+            case 0:
+                return PrecachedPort.p0;
+            case 1:
+                return PrecachedPort.p1;
+            case 2:
+                return PrecachedPort.p2;
+            case 3:
+                return PrecachedPort.p3;
+            case 4:
+                return PrecachedPort.p4;
+            case 5:
+                return PrecachedPort.p5;
+            case 6:
+                return PrecachedPort.p6;
+            case 7:
+                return PrecachedPort.p7;
+            case 8:
+                return PrecachedPort.p8;
+            case 9:
+                return PrecachedPort.p9;
+            case 10:
+                return PrecachedPort.p10;
+            case 11:
+                return PrecachedPort.p11;
+            case 12:
+                return PrecachedPort.p12;
+            case 13:
+                return PrecachedPort.p13;
+            case 14:
+                return PrecachedPort.p14;
+            case 15:
+                return PrecachedPort.p15;
+            case 16:
+                return PrecachedPort.p16;
+            case 17:
+                return PrecachedPort.p17;
+            case 18:
+                return PrecachedPort.p18;
+            case 19:
+                return PrecachedPort.p19;
+            case 20:
+                return PrecachedPort.p20;
+            case 21:
+                return PrecachedPort.p21;
+            case 22:
+                return PrecachedPort.p22;
+            case 23:
+                return PrecachedPort.p23;
+            case 24:
+                return PrecachedPort.p24;
+            case 25:
+                return PrecachedPort.p25;
+            case 26:
+                return PrecachedPort.p26;
+            case 27:
+                return PrecachedPort.p27;
+            case 28:
+                return PrecachedPort.p28;
+            case 29:
+                return PrecachedPort.p29;
+            case 31:
+                return PrecachedPort.p31;
+            case 32:
+                return PrecachedPort.p32;
+            case 33:
+                return PrecachedPort.p33;
+            case 34:
+                return PrecachedPort.p34;
+            case 35:
+                return PrecachedPort.p35;
+            case 36:
+                return PrecachedPort.p36;
+            case 37:
+                return PrecachedPort.p37;
+            case 38:
+                return PrecachedPort.p38;
+            case 39:
+                return PrecachedPort.p39;
+            case 40:
+                return PrecachedPort.p40;
+            case 41:
+                return PrecachedPort.p41;
+            case 42:
+                return PrecachedPort.p42;
+            case 43:
+                return PrecachedPort.p43;
+            case 44:
+                return PrecachedPort.p44;
+            case 45:
+                return PrecachedPort.p45;
+            case 46:
+                return PrecachedPort.p46;
+            case 47:
+                return PrecachedPort.p47;
+            case 48:
+                return PrecachedPort.p48;
+            case OFPP_MAX_SHORT:
+                return MAX;
+            case OFPP_IN_PORT_SHORT:
+                return IN_PORT;
+            case OFPP_TABLE_SHORT:
+                return TABLE;
+            case OFPP_NORMAL_SHORT:
+                return NORMAL;
+            case OFPP_FLOOD_SHORT:
+                return FLOOD;
+            case OFPP_ALL_SHORT:
+                return ALL;
+            case OFPP_CONTROLLER_SHORT:
+                return CONTROLLER;
+            case OFPP_LOCAL_SHORT:
+                return LOCAL;
+            case OFPP_ANY_SHORT:
+                return ANY;
+            default:
+                // note: This means effectively : portNumber > OFPP_MAX_SHORT
+                // accounting for
+                // signedness of both portNumber and OFPP_MAX_SHORT (which is
+                // -256).
+                // Any unsigned integer value > OFPP_MAX_SHORT will be ]-256:0[
+                // when read signed
+                if (portNumber < 0 && portNumber > OFPP_MAX_SHORT)
+                    throw new IllegalArgumentException("Unknown special port number: "
+                            + portNumber);
+                return new OFPort(portNumber);
+        }
+    }
+
+    /** return the port number as a int32 */
+    public int getPortNumber() {
+        return portNumber;
+    }
+
+    /**
+     * return the port number as int16. Special ports as defined by the OpenFlow
+     * spec will be converted to their OpenFlow 1.0 equivalent. port numbers >=
+     * FF00 will cause a IllegalArgumentException to be thrown
+     *
+     * @throws IllegalArgumentException
+     *             if a regular port number exceeds the maximum value in OF1.0
+     **/
+    public short getShortPortNumber() {
+
+        switch (portNumber) {
+            case OFPP_MAX_INT:
+                return OFPP_MAX_SHORT;
+            case OFPP_IN_PORT_INT:
+                return OFPP_IN_PORT_SHORT;
+            case OFPP_TABLE_INT:
+                return OFPP_TABLE_SHORT;
+            case OFPP_NORMAL_INT:
+                return OFPP_NORMAL_SHORT;
+            case OFPP_FLOOD_INT:
+                return OFPP_FLOOD_SHORT;
+            case OFPP_ALL_INT:
+                return OFPP_ALL_SHORT;
+            case OFPP_CONTROLLER_INT:
+                return OFPP_CONTROLLER_SHORT;
+            case OFPP_LOCAL_INT:
+                return OFPP_LOCAL_SHORT;
+            case OFPP_ANY_INT:
+                return OFPP_ANY_SHORT;
+
+            default:
+                if (portNumber >= OFPP_MAX_SHORT_UNSIGNED || portNumber < 0)
+                    throw new IllegalArgumentException("32bit Port number "
+                            + U32.f(portNumber)
+                            + " cannot be represented as uint16 (OF1.0)");
+
+                return (short) portNumber;
+        }
+    }
+
+    @Override
+    public String toString() {
+        return Long.toString(U32.f(portNumber));
+    }
+
+    /** Extension of OFPort for named ports */
+    static class NamedPort extends OFPort {
+        private final String name;
+
+        NamedPort(final int portNo, final String name) {
+            super(portNo);
+            this.name = name;
+        }
+
+        public String getName() {
+            return name;
+        }
+
+        @Override
+        public String toString() {
+            return name;
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof OFPort))
+            return false;
+        OFPort other = (OFPort)obj;
+        if (other.portNumber != this.portNumber)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 53;
+        int result = 1;
+        result = prime * result + portNumber;
+        return result;
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.portNumber);
+    }
+
+    public static OFPort read2Bytes(ChannelBuffer c) throws OFParseError {
+        return OFPort.ofShort(c.readShort());
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(this.portNumber);
+    }
+
+    public static OFPort read4Bytes(ChannelBuffer c) throws OFParseError {
+        return OFPort.of((int)(c.readUnsignedInt() & 0xFFFFFFFF));
+    }
+
+    @Override
+    public OFPort applyMask(OFPort mask) {
+        return OFPort.of(this.portNumber & mask.portNumber);
+    }
+
+    @Override
+    public int compareTo(OFPort o) {
+        return UnsignedInts.compare(this.portNumber, o.portNumber);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(portNumber);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFPortBitMap.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFPortBitMap.java
new file mode 100644
index 0000000..63b97f3
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFPortBitMap.java
@@ -0,0 +1,165 @@
+package org.projectfloodlight.openflow.types;
+
+import java.util.ArrayList;
+
+import javax.annotation.concurrent.Immutable;
+
+
+/** User-facing object representing a bitmap of ports that can be matched on.
+ *  This is implemented by the custom BSN OXM type of_oxm_bsn_in_ports_182.
+ *
+ *  You can call set() on the builder for all the Ports you want to match on
+ *  and unset to exclude the port.
+ *
+ *  <b>Implementation note:</b> to comply with the matching semantics of OXM (which is a logical "AND" not "OR")
+ *  the underlying match uses a data format which is very unintuitive. The value is always
+ *  0, and the mask has the bits set for the ports that should <b>NOT</b> be included in the
+ *  range.
+ *
+ *  For the curious: We transformed the bitmap (a logical OR) problem into a logical
+ *  AND NOT problem.
+ *
+ *  We logically mean:   Inport is 1 OR 3
+ *  We technically say:  Inport IS NOT 2 AND IS NOT 4 AND IS NOT 5 AND IS NOT ....
+ *  The second term cannot be represented in OXM, the second can.
+ *
+ *  That said, all that craziness is hidden from the user of this object.
+ *
+ *  <h2>Usage</h2>
+ *  OFPortBitmap is meant to be used with MatchField <tt>BSN_IN_PORTS_128</tt> in place
+ *  of the raw type Masked&lt;OFBitMask128&gt;.
+ *
+ *  <h3>Example:</h3>:
+ *  <pre>
+ *  OFPortBitMap portBitMap;
+ *  Match.Builder matchBuilder;
+ *  // initialize
+ *  matchBuilder.setMasked(MatchField.BSN_IN_PORTS_128, portBitmap);
+ *  </pre>
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ */
+@Immutable
+public class OFPortBitMap extends Masked<OFBitMask128> {
+
+    private OFPortBitMap(OFBitMask128 mask) {
+        super(OFBitMask128.NONE, mask);
+    }
+
+    /** @return whether or not the given port is logically included in the
+     *  match, i.e., whether a packet from in-port <emph>port</emph> be matched by
+     *  this OXM.
+     */
+    public boolean isOn(OFPort port) {
+        // see the implementation note above about the logical inversion of the mask
+        return !(this.mask.isOn(port.getPortNumber()));
+    }
+
+    public static OFPortBitMap ofPorts(OFPort... ports) {
+        Builder builder = new Builder();
+        for (OFPort port: ports) {
+            builder.set(port);
+        }
+        return builder.build();
+    }
+
+    /** @return an OFPortBitmap based on the 'mask' part of an OFBitMask128, as, e.g., returned
+     *  by the switch.
+     **/
+    public static OFPortBitMap of(OFBitMask128 mask) {
+        return new OFPortBitMap(mask);
+    }
+
+    /** @return iterating over all ports that are logically included in the
+     *  match, i.e., whether a packet from in-port <emph>port</emph> be matched by
+     *  this OXM.
+     */
+    public Iterable<OFPort> getOnPorts() {
+        ArrayList<OFPort> ports = new ArrayList<>();
+        for(int i=0; i < 127; i++) {
+            if(!(this.mask.isOn(i))) {
+                ports.add(OFPort.of(i));
+            }
+        }
+        return ports;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof OFPortBitMap))
+            return false;
+        OFPortBitMap other = (OFPortBitMap)obj;
+        return (other.value.equals(this.value) && other.mask.equals(this.mask));
+    }
+
+    @Override
+    public int hashCode() {
+        return 619 * mask.hashCode() + 257 * value.hashCode();
+    }
+
+    public static class Builder {
+        private long raw1 = -1, raw2 = -1;
+
+        public Builder() {
+
+        }
+
+        /** @return whether or not the given port is logically included in the
+         *  match, i.e., whether a packet from in-port <emph>port</emph> be matched by
+         *  this OXM.
+         */
+        public boolean isOn(OFPort port) {
+            // see the implementation note above about the logical inversion of the mask
+            return !(OFBitMask128.isBitOn(raw1, raw2, port.getPortNumber()));
+        }
+
+        /** remove this port from the match, i.e., packets from this in-port
+         *  will NOT be matched.
+         */
+        public Builder unset(OFPort port) {
+            // see the implementation note above about the logical inversion of the mask
+            int bit = port.getPortNumber();
+            if (bit < 0 || bit > 127)
+                throw new IndexOutOfBoundsException("Port number is out of bounds");
+            else if (bit == 127)
+                // the highest order bit in the bitmask is reserved. The switch will
+                // set that bit for all ports >= 127. The reason is that we don't want
+                // the OFPortMap to match all ports out of its range (i.e., a packet
+                // coming in on port 181 would match *any* OFPortMap).
+                throw new IndexOutOfBoundsException("The highest order bit in the bitmask is reserved.");
+            else if (bit < 64) {
+                raw2 |= ((long)1 << bit);
+            } else {
+                raw1 |= ((long)1 << (bit - 64));
+            }
+            return this;
+        }
+
+        /** add this port from the match, i.e., packets from this in-port
+         *  will NOT be matched.
+         */
+        public Builder set(OFPort port) {
+            // see the implementation note above about the logical inversion of the mask
+            int bit = port.getPortNumber();
+            if (bit < 0 || bit > 127)
+                throw new IndexOutOfBoundsException("Port number is out of bounds");
+            else if (bit == 127)
+                // the highest order bit in the bitmask is reserved. The switch will
+                // set that bit for all ports >= 127. The reason is that we don't want
+                // the OFPortMap to match all ports out of its range (i.e., a packet
+                // coming in on port 181 would match *any* OFPortMap).
+                throw new IndexOutOfBoundsException("The highest order bit in the bitmask is reserved.");
+            else if (bit < 64) {
+                raw2 &= ~((long)1 << bit);
+            } else {
+                raw1 &= ~((long)1 << (bit - 64));
+            }
+            return this;
+        }
+
+        public OFPortBitMap build() {
+            return new OFPortBitMap(OFBitMask128.of(raw1, raw2));
+        }
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFValueType.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFValueType.java
new file mode 100644
index 0000000..03e84dd
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFValueType.java
@@ -0,0 +1,11 @@
+package org.projectfloodlight.openflow.types;
+
+
+
+
+public interface OFValueType<T extends OFValueType<T>> extends Comparable<T>, PrimitiveSinkable {
+    public int getLength();
+
+    public T applyMask(T mask);
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFVlanVidMatch.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFVlanVidMatch.java
new file mode 100644
index 0000000..fddaa5d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFVlanVidMatch.java
@@ -0,0 +1,190 @@
+package org.projectfloodlight.openflow.types;
+
+import javax.annotation.Nullable;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+/** Represents an OpenFlow Vlan VID for use in Matches, as specified by the OpenFlow 1.3 spec.
+ *
+ *  <b> Note: this is not just the 12-bit vlan tag. OpenFlow defines
+ *      the additional mask bits 0x1000 to represent the presence of a vlan
+ *      tag. This additional bit will be stripped when writing a OF1.0 value
+ *      tag.
+ *  </b>
+ *
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ *
+ */
+public class OFVlanVidMatch implements OFValueType<OFVlanVidMatch> {
+    private static final Logger logger = LoggerFactory.getLogger(OFVlanVidMatch.class);
+
+    private static final short VALIDATION_MASK = 0x1FFF;
+    private static final short PRESENT_VAL = 0x1000;
+    private static final short VLAN_MASK = 0x0FFF;
+    private static final short NONE_VAL = 0x0000;
+    private static final short UNTAGGED_VAL_OF13 = (short) 0x0000;
+    private static final short UNTAGGED_VAL_OF10 = (short) 0xFFFF;
+    final static int LENGTH = 2;
+
+    /** presence of a VLAN tag is indicated by the presence of bit 0x1000 */
+    public static final OFVlanVidMatch PRESENT = new OFVlanVidMatch(PRESENT_VAL);
+
+    /** this value means 'not set' in OF1.0 (e.g., in a match). not used elsewhere */
+    public static final OFVlanVidMatch NONE = new OFVlanVidMatch(NONE_VAL);
+
+    /** for use with masking operations */
+    public static final OFVlanVidMatch NO_MASK = new OFVlanVidMatch((short)0xFFFF);
+    public static final OFVlanVidMatch FULL_MASK = NONE;
+
+    /** an untagged packet is specified as 0000 in OF 1.0, but 0xFFFF in OF1.0. Special case that. */
+    public static final OFVlanVidMatch UNTAGGED = new OFVlanVidMatch(NONE_VAL) {
+        @Override
+        public void write2BytesOF10(ChannelBuffer c) {
+            c.writeShort(UNTAGGED_VAL_OF10);
+        }
+    };
+
+    private final short vid;
+
+    private OFVlanVidMatch(short vid) {
+        this.vid = vid;
+    }
+
+    public static OFVlanVidMatch ofRawVid(short vid) {
+        if(vid == UNTAGGED_VAL_OF13)
+            return UNTAGGED;
+        else if(vid == PRESENT_VAL)
+            return PRESENT;
+        else if(vid == UNTAGGED_VAL_OF10) {
+            // workaround for IVS sometimes sending 0F1.0 untagged (0xFFFF) values
+            logger.warn("Warning: received OF1.0 untagged vlan value (0xFFFF) in OF1.3 VlanVid. Treating as UNTAGGED");
+            return UNTAGGED;
+        } else if ((vid & VALIDATION_MASK) != vid)
+            throw new IllegalArgumentException(String.format("Illegal VLAN value: %x", vid));
+        return new OFVlanVidMatch(vid);
+    }
+
+    public static OFVlanVidMatch ofVlanVid(VlanVid vid) {
+        return ofVlan(vid.getVlan());
+    }
+
+
+    public static OFVlanVidMatch ofVlan(int vlan) {
+        if( (vlan & VLAN_MASK) != vlan)
+            throw new IllegalArgumentException(String.format("Illegal VLAN value: %x", vlan));
+        return ofRawVid( (short) (vlan | PRESENT_VAL));
+    }
+
+    public static OFVlanVidMatch ofVlanOF10(short of10vlan) {
+        if(of10vlan == NONE_VAL) {
+            return NONE;
+        } else if(of10vlan == UNTAGGED_VAL_OF10) {
+            return UNTAGGED;
+        } else {
+            return ofVlan(of10vlan);
+        }
+    }
+
+    /** @return whether or not this VlanId has the present (0x1000) bit set */
+    public boolean isPresentBitSet() {
+       return (vid & PRESENT_VAL) != 0;
+    }
+
+    /** @return the actual VLAN tag this vid identifies */
+    public short getVlan() {
+        return (short) (vid & VLAN_MASK);
+    }
+
+    /** @return the actual vlan tag this vid identifies as a VlanVid object, if this
+     *  VlanVidMatch has the present bit set (i.e., identifies a tagged VLAN).
+     *  Else, returns null.
+     */
+    @Nullable
+    public VlanVid getVlanVid() {
+        return isPresentBitSet() ? VlanVid.ofVlan((short) (vid & VLAN_MASK)) : null;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof OFVlanVidMatch))
+            return false;
+        OFVlanVidMatch other = (OFVlanVidMatch)obj;
+        if (other.vid != this.vid)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int prime = 13873;
+        return this.vid * prime;
+    }
+
+    @Override
+    public String toString() {
+        return "0x" + Integer.toHexString(vid);
+    }
+
+    public short getRawVid() {
+        return vid;
+    }
+
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+
+    volatile byte[] bytesCache = null;
+
+    public byte[] getBytes() {
+        if (bytesCache == null) {
+            synchronized (this) {
+                if (bytesCache == null) {
+                    bytesCache =
+                            new byte[] { (byte) ((vid >>> 8) & 0xFF),
+                                         (byte) ((vid >>> 0) & 0xFF) };
+                }
+            }
+        }
+        return bytesCache;
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.vid);
+    }
+
+    public void write2BytesOF10(ChannelBuffer c) {
+        c.writeShort(this.getVlan());
+    }
+
+    public static OFVlanVidMatch read2Bytes(ChannelBuffer c) throws OFParseError {
+        return OFVlanVidMatch.ofRawVid(c.readShort());
+    }
+
+    public static OFVlanVidMatch read2BytesOF10(ChannelBuffer c) throws OFParseError {
+        return OFVlanVidMatch.ofVlanOF10(c.readShort());
+    }
+
+    @Override
+    public OFVlanVidMatch applyMask(OFVlanVidMatch mask) {
+        return OFVlanVidMatch.ofRawVid((short)(this.vid & mask.vid));
+    }
+
+    @Override
+    public int compareTo(OFVlanVidMatch o) {
+        return Shorts.compare(vid, o.vid);
+    }
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort(vid);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFVlanVidMatchWithMask.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFVlanVidMatchWithMask.java
new file mode 100644
index 0000000..c91c28c
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/OFVlanVidMatchWithMask.java
@@ -0,0 +1,10 @@
+package org.projectfloodlight.openflow.types;
+
+public class OFVlanVidMatchWithMask extends Masked<OFVlanVidMatch> {
+    private OFVlanVidMatchWithMask(OFVlanVidMatch value, OFVlanVidMatch mask) {
+        super(value, mask);
+    }
+
+    /* a combination of Vlan Vid and mask that matches any tagged packet */
+    public final static OFVlanVidMatchWithMask ANY_TAGGED = new OFVlanVidMatchWithMask(OFVlanVidMatch.PRESENT, OFVlanVidMatch.PRESENT);
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/PortSpeed.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/PortSpeed.java
new file mode 100644
index 0000000..6affab8
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/PortSpeed.java
@@ -0,0 +1,33 @@
+package org.projectfloodlight.openflow.types;
+
+/**
+ * Represents the speed of a port
+ */
+public enum PortSpeed {
+    /** no speed set */
+    SPEED_NONE(0),
+    SPEED_10MB(10),
+    SPEED_100MB(100),
+    SPEED_1GB(1_000),
+    SPEED_10GB(10_000),
+    SPEED_40GB(40_000),
+    SPEED_100GB(100_000),
+    SPEED_1TB(1_000_000);
+
+    private long speedInBps;
+    private PortSpeed(int speedInMbps) {
+        this.speedInBps = speedInMbps * 1000L*1000L;
+    }
+
+    public long getSpeedBps() {
+        return this.speedInBps;
+    }
+
+    public static PortSpeed max(PortSpeed s1, PortSpeed s2) {
+        return (s1.getSpeedBps() > s2.getSpeedBps()) ? s1 : s2;
+    }
+
+    public static PortSpeed min(PortSpeed s1, PortSpeed s2) {
+        return (s1.getSpeedBps() < s2.getSpeedBps()) ? s1 : s2;
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/PrimitiveSinkable.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/PrimitiveSinkable.java
new file mode 100644
index 0000000..e50cb75
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/PrimitiveSinkable.java
@@ -0,0 +1,7 @@
+package org.projectfloodlight.openflow.types;
+
+import com.google.common.hash.PrimitiveSink;
+
+public interface PrimitiveSinkable {
+    public void putTo(PrimitiveSink sink);
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/TableId.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/TableId.java
new file mode 100644
index 0000000..950087d
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/TableId.java
@@ -0,0 +1,100 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+public class TableId implements OFValueType<TableId>, Comparable<TableId> {
+
+    final static int LENGTH = 1;
+
+    private static final short VALIDATION_MASK = 0x00FF;
+
+    private static final short ALL_VAL = 0x00FF;
+    private static final short NONE_VAL = 0x0000;
+    public static final TableId NONE = new TableId(NONE_VAL);
+
+    public static final TableId ALL = new TableId(ALL_VAL);
+    public static final TableId ZERO = NONE;
+
+    private final short id;
+
+    private TableId(short id) {
+        this.id = id;
+    }
+
+    public static TableId of(short id) {
+        switch(id) {
+            case NONE_VAL:
+                return NONE;
+            case ALL_VAL:
+                return ALL;
+            default:
+                if ((id & VALIDATION_MASK) != id)
+                    throw new IllegalArgumentException("Illegal Table id value: " + id);
+                return new TableId(id);
+        }
+    }
+
+    public static TableId of(int id) {
+        if((id & VALIDATION_MASK) != id)
+            throw new IllegalArgumentException("Illegal Table id value: "+id);
+        return of((short) id);
+    }
+
+    @Override
+    public String toString() {
+        return "0x" + Integer.toHexString(id);
+    }
+
+    public short getValue() {
+        return id;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.id);
+    }
+
+    public static TableId readByte(ChannelBuffer c) throws OFParseError {
+        return TableId.of(c.readUnsignedByte());
+    }
+
+    @Override
+    public TableId applyMask(TableId mask) {
+        return TableId.of((short)(this.id & mask.id));
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof TableId))
+            return false;
+        TableId other = (TableId)obj;
+        if (other.id != this.id)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int prime = 13873;
+        return this.id * prime;
+    }
+
+    @Override
+    public int compareTo(TableId other) {
+        return Shorts.compare(this.id, other.id);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte((byte) id);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/TransportPort.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/TransportPort.java
new file mode 100644
index 0000000..01019b0
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/TransportPort.java
@@ -0,0 +1,96 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Ints;
+
+/**
+ * Represents L4 (Transport Layer) port (TCP, UDP, etc.)
+ *
+ * @author Yotam Harchol (yotam.harchol@bigswitch.com)
+ */
+public class TransportPort implements OFValueType<TransportPort> {
+
+    static final int LENGTH = 2;
+    static final int MAX_PORT = 0xFFFF;
+    static final int MIN_PORT = 0;
+
+    private final static int NONE_VAL = 0;
+    public final static TransportPort NONE = new TransportPort(NONE_VAL);
+
+    public static final TransportPort NO_MASK = new TransportPort(0xFFFFFFFF);
+    public static final TransportPort FULL_MASK = TransportPort.of(0x0);
+
+    private final int port;
+
+    private TransportPort(int port) {
+        this.port = port;
+    }
+
+    public static TransportPort of(int port) {
+        if(port == NONE_VAL)
+            return NONE;
+        else if (port < MIN_PORT || port > MAX_PORT) {
+            throw new IllegalArgumentException("Illegal transport layer port number: " + port);
+        }
+        return new TransportPort(port);
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public int getPort() {
+        return port;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof TransportPort))
+            return false;
+        TransportPort other = (TransportPort)obj;
+        if (other.port != this.port)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 59;
+        int result = 1;
+        result = prime * result + port;
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toString(port);
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.port);
+    }
+
+    public static TransportPort read2Bytes(ChannelBuffer c) throws OFParseError {
+        return TransportPort.of((c.readUnsignedShort() & 0x0FFFF));
+    }
+
+    @Override
+    public TransportPort applyMask(TransportPort mask) {
+        return TransportPort.of(this.port & mask.port);
+    }
+
+    @Override
+    public int compareTo(TransportPort o) {
+        return Ints.compare(port,  o.port);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort((short) port);
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U16.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U16.java
new file mode 100644
index 0000000..9de7e14
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U16.java
@@ -0,0 +1,126 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMessageReader;
+import org.projectfloodlight.openflow.protocol.Writeable;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Ints;
+
+public class U16 implements Writeable, OFValueType<U16> {
+    private final static short ZERO_VAL = 0;
+    public final static U16 ZERO = new U16(ZERO_VAL);
+
+    public static int f(final short i) {
+        return i & 0xffff;
+    }
+
+    public static short t(final int l) {
+        return (short) l;
+    }
+
+    private final short raw;
+
+    private U16(short raw) {
+        this.raw = raw;
+    }
+
+    public static final U16 of(int value) {
+        return ofRaw(t(value));
+    }
+
+    public static final U16 ofRaw(short raw) {
+        if(raw == ZERO_VAL)
+            return ZERO;
+        return new U16(raw);
+    }
+
+    public int getValue() {
+        return f(raw);
+    }
+
+    public short getRaw() {
+        return raw;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toString(f(raw));
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + raw;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        U16 other = (U16) obj;
+        if (raw != other.raw)
+            return false;
+        return true;
+    }
+
+
+    @Override
+    public void writeTo(ChannelBuffer bb) {
+        bb.writeShort(raw);
+    }
+
+
+    public final static Reader READER = new Reader();
+
+    private static class Reader implements OFMessageReader<U16> {
+        @Override
+        public U16 readFrom(ChannelBuffer bb) throws OFParseError {
+            return ofRaw(bb.readShort());
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return 2;
+    }
+
+    @Override
+    public U16 applyMask(U16 mask) {
+        return ofRaw( (short) (raw & mask.raw));
+    }
+
+    @Override
+    public int compareTo(U16 o) {
+        return Ints.compare(f(raw), f(o.raw));
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort(raw);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U32.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U32.java
new file mode 100644
index 0000000..7f53374
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U32.java
@@ -0,0 +1,130 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMessageReader;
+import org.projectfloodlight.openflow.protocol.Writeable;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+public class U32 implements Writeable, OFValueType<U32> {
+    private final static int ZERO_VAL = 0;
+    public final static U32 ZERO = new U32(ZERO_VAL);
+
+    private static final int NO_MASK_VAL = 0xFFffFFff;
+    public final static U32 NO_MASK = new U32(NO_MASK_VAL);
+    public static final U32 FULL_MASK = ZERO;
+
+    private final int raw;
+
+    private U32(int raw) {
+        this.raw = raw;
+    }
+
+    public static U32 of(long value) {
+        return ofRaw(U32.t(value));
+    }
+
+    public static U32 ofRaw(int raw) {
+        if(raw == ZERO_VAL)
+            return ZERO;
+        if(raw == NO_MASK_VAL)
+            return NO_MASK;
+        return new U32(raw);
+    }
+
+    public long getValue() {
+        return f(raw);
+    }
+
+    public int getRaw() {
+        return raw;
+    }
+
+    @Override
+    public String toString() {
+        return "" + f(raw);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + raw;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        U32 other = (U32) obj;
+        if (raw != other.raw)
+            return false;
+
+        return true;
+    }
+
+    public static long f(final int i) {
+        return i & 0xffffffffL;
+    }
+
+    public static int t(final long l) {
+        return (int) l;
+    }
+
+    @Override
+    public void writeTo(ChannelBuffer bb) {
+        bb.writeInt(raw);
+    }
+
+    public final static Reader READER = new Reader();
+
+    private static class Reader implements OFMessageReader<U32> {
+        @Override
+        public U32 readFrom(ChannelBuffer bb) throws OFParseError {
+            return new U32(bb.readInt());
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return 4;
+    }
+
+    @Override
+    public U32 applyMask(U32 mask) {
+        return ofRaw(raw & mask.raw);
+    }
+
+    @Override
+    public int compareTo(U32 o) {
+        return UnsignedInts.compare(raw, o.raw);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(raw);
+    }}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U64.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U64.java
new file mode 100644
index 0000000..f480c47
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U64.java
@@ -0,0 +1,139 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.types;
+
+import java.math.BigInteger;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMessageReader;
+import org.projectfloodlight.openflow.protocol.Writeable;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedLongs;
+
+public class U64 implements Writeable, OFValueType<U64> {
+    private static final long UNSIGNED_MASK = 0x7fffffffffffffffL;
+    private final static long ZERO_VAL = 0;
+    public final static U64 ZERO = new U64(ZERO_VAL);
+
+    private final long raw;
+
+    protected U64(final long raw) {
+        this.raw = raw;
+    }
+
+    public static U64 of(long raw) {
+        return ofRaw(raw);
+    }
+
+    public static U64 ofRaw(final long raw) {
+        if(raw == ZERO_VAL)
+            return ZERO;
+        return new U64(raw);
+    }
+
+    public static U64 parseHex(String hex) {
+        return new U64(new BigInteger(hex, 16).longValue());
+    }
+
+    public long getValue() {
+        return raw;
+    }
+
+    public BigInteger getBigInteger() {
+        BigInteger bigInt = BigInteger.valueOf(raw & UNSIGNED_MASK);
+        if (raw < 0) {
+          bigInt = bigInt.setBit(Long.SIZE - 1);
+        }
+        return bigInt;
+    }
+
+    @Override
+    public String toString() {
+        return getBigInteger().toString();
+    }
+
+    public static BigInteger f(final long value) {
+        BigInteger bigInt = BigInteger.valueOf(value & UNSIGNED_MASK);
+        if (value < 0) {
+          bigInt = bigInt.setBit(Long.SIZE - 1);
+        }
+        return bigInt;
+    }
+
+    public static long t(final BigInteger l) {
+        return l.longValue();
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + (int) (raw ^ (raw >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        U64 other = (U64) obj;
+        if (raw != other.raw)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int getLength() {
+        return 8;
+    }
+
+    @Override
+    public U64 applyMask(U64 mask) {
+        return ofRaw(raw & mask.raw);
+    }
+
+    @Override
+    public void writeTo(ChannelBuffer bb) {
+        bb.writeLong(raw);
+    }
+
+    @Override
+    public int compareTo(U64 o) {
+        return UnsignedLongs.compare(raw, o.raw);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putLong(raw);
+    }
+
+    public final static Reader READER = new Reader();
+
+    private static class Reader implements OFMessageReader<U64> {
+        @Override
+        public U64 readFrom(ChannelBuffer bb) throws OFParseError {
+            return U64.ofRaw(bb.readLong());
+        }
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U8.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U8.java
new file mode 100644
index 0000000..c644599
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/U8.java
@@ -0,0 +1,133 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMessageReader;
+import org.projectfloodlight.openflow.protocol.Writeable;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedBytes;
+
+public class U8 implements Writeable, OFValueType<U8> {
+    private final static byte ZERO_VAL = 0;
+    public final static U8 ZERO = new U8(ZERO_VAL);
+
+    private static final byte NO_MASK_VAL = (byte) 0xFF;
+    public static final U8 NO_MASK = new U8(NO_MASK_VAL);
+    public static final U8 FULL_MASK = ZERO;
+
+    private final byte raw;
+
+    private U8(byte raw) {
+        this.raw = raw;
+    }
+
+    public static final U8 of(short value) {
+        if(value == ZERO_VAL)
+            return ZERO;
+        if(value == NO_MASK_VAL)
+            return NO_MASK;
+
+        return new U8(t(value));
+    }
+
+    public static final U8 ofRaw(byte value) {
+        return new U8(value);
+    }
+
+    public short getValue() {
+        return f(raw);
+    }
+
+    public byte getRaw() {
+        return raw;
+    }
+
+    @Override
+    public String toString() {
+        return "" + f(raw);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + raw;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        U8 other = (U8) obj;
+        if (raw != other.raw)
+            return false;
+        return true;
+    }
+
+
+    @Override
+    public void writeTo(ChannelBuffer bb) {
+        bb.writeByte(raw);
+    }
+
+    public static short f(final byte i) {
+        return (short) (i & 0xff);
+    }
+
+    public static byte t(final short l) {
+        return (byte) l;
+    }
+
+
+    public final static Reader READER = new Reader();
+
+    private static class Reader implements OFMessageReader<U8> {
+        @Override
+        public U8 readFrom(ChannelBuffer bb) throws OFParseError {
+            return new U8(bb.readByte());
+        }
+    }
+
+    @Override
+    public int getLength() {
+        return 1;
+    }
+
+    @Override
+    public U8 applyMask(U8 mask) {
+        return ofRaw( (byte) (raw & mask.raw));
+    }
+
+    @Override
+    public int compareTo(U8 o) {
+        return UnsignedBytes.compare(raw, o.raw);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte(raw);
+    }
+ }
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VRF.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VRF.java
new file mode 100644
index 0000000..b742da5
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VRF.java
@@ -0,0 +1,85 @@
+package org.projectfloodlight.openflow.types;
+
+import javax.annotation.concurrent.Immutable;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedInts;
+
+@Immutable
+public class VRF implements OFValueType<VRF> {
+    static final int LENGTH = 4;
+    private final int rawValue;
+
+    public static final VRF ZERO = VRF.of(0x0);
+    public static final VRF NO_MASK = VRF.of(0xFFFFFFFF);
+    public static final VRF FULL_MASK = VRF.of(0x00000000);
+
+    private VRF(final int rawValue) {
+        this.rawValue = rawValue;
+    }
+
+    public static VRF of(final int raw) {
+        return new VRF(raw);
+    }
+
+    public int getInt() {
+        return rawValue;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + rawValue;
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        VRF other = (VRF) obj;
+        if (rawValue != other.rawValue)
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return Integer.toString(rawValue);
+    }
+
+    public void write4Bytes(ChannelBuffer c) {
+        c.writeInt(rawValue);
+    }
+
+    public static VRF read4Bytes(ChannelBuffer c) {
+        return VRF.of(c.readInt());
+    }
+
+    @Override
+    public VRF applyMask(VRF mask) {
+        return VRF.of(this.rawValue & mask.rawValue);
+    }
+
+    @Override
+    public int compareTo(VRF o) {
+        return UnsignedInts.compare(rawValue, o.rawValue);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putInt(rawValue);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VlanPcp.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VlanPcp.java
new file mode 100644
index 0000000..cbb7004
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VlanPcp.java
@@ -0,0 +1,82 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.UnsignedBytes;
+
+public class VlanPcp implements OFValueType<VlanPcp> {
+
+    private static final byte VALIDATION_MASK = 0x07;
+    private static final byte NONE_VAL = 0x00;
+    static final int LENGTH = 1;
+
+    private final byte pcp;
+
+    public static final VlanPcp NONE = new VlanPcp(NONE_VAL);
+    public static final VlanPcp NO_MASK = new VlanPcp((byte)0xFF);
+    public static final VlanPcp FULL_MASK = VlanPcp.of((byte)0x0);
+
+    private VlanPcp(byte pcp) {
+        this.pcp = pcp;
+    }
+
+    public static VlanPcp of(byte pcp) {
+        if ((pcp & VALIDATION_MASK) != pcp)
+            throw new IllegalArgumentException("Illegal VLAN PCP value: " + pcp);
+        return new VlanPcp(pcp);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof VlanPcp))
+            return false;
+        VlanPcp other = (VlanPcp)obj;
+        if (other.pcp != this.pcp)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int prime = 20173;
+        return this.pcp * prime;
+    }
+
+    @Override
+    public String toString() {
+        return "0x" + Integer.toHexString(pcp);
+    }
+
+    public byte getValue() {
+        return pcp;
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    public void writeByte(ChannelBuffer c) {
+        c.writeByte(this.pcp);
+    }
+
+    public static VlanPcp readByte(ChannelBuffer c) throws OFParseError {
+        return VlanPcp.of((byte)(c.readUnsignedByte() & 0xFF));
+    }
+
+    @Override
+    public VlanPcp applyMask(VlanPcp mask) {
+        return VlanPcp.of((byte)(this.pcp & mask.pcp));
+    }
+
+    @Override
+    public int compareTo(VlanPcp o) {
+        return UnsignedBytes.compare(pcp, o.pcp);
+    }
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putByte(pcp);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VlanVid.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VlanVid.java
new file mode 100644
index 0000000..8337eb6
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/types/VlanVid.java
@@ -0,0 +1,111 @@
+package org.projectfloodlight.openflow.types;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.hash.PrimitiveSink;
+import com.google.common.primitives.Shorts;
+
+/** Represents an 802.1Q Vlan VID (12 bits).
+ *
+ * @author Andreas Wundsam <andreas.wundsam@bigswitch.com>
+ *
+ */
+public class VlanVid implements OFValueType<VlanVid> {
+
+    private static final short VALIDATION_MASK = 0x0FFF;
+    private static final short ZERO_VAL = 0x0000;
+    final static int LENGTH = 2;
+
+    /** this value means 'not set' in OF1.0 (e.g., in a match). not used elsewhere */
+    public static final VlanVid ZERO = new VlanVid(ZERO_VAL);
+
+    /** for use with masking operations */
+    public static final VlanVid NO_MASK = new VlanVid((short)0xFFFF);
+    public static final VlanVid FULL_MASK = ZERO;
+
+    private final short vid;
+
+    private VlanVid(short vid) {
+        this.vid = vid;
+    }
+
+    public static VlanVid ofVlan(int vid) {
+        if ((vid & VALIDATION_MASK) != vid)
+            throw new IllegalArgumentException(String.format("Illegal VLAN value: %x", vid));
+        return new VlanVid((short) vid);
+    }
+
+    /** @return the actual VLAN tag this vid identifies */
+    public short getVlan() {
+        return vid;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof VlanVid))
+            return false;
+        VlanVid other = (VlanVid)obj;
+        if (other.vid != this.vid)
+            return false;
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int prime = 13873;
+        return this.vid * prime;
+    }
+
+    @Override
+    public String toString() {
+        return "0x" + Integer.toHexString(vid);
+    }
+
+    @Override
+    public int getLength() {
+        return LENGTH;
+    }
+
+    volatile byte[] bytesCache = null;
+
+    public byte[] getBytes() {
+        if (bytesCache == null) {
+            synchronized (this) {
+                if (bytesCache == null) {
+                    bytesCache =
+                            new byte[] { (byte) ((vid >>> 8) & 0xFF),
+                                         (byte) ((vid >>> 0) & 0xFF) };
+                }
+            }
+        }
+        return bytesCache;
+    }
+
+    public void write2Bytes(ChannelBuffer c) {
+        c.writeShort(this.vid);
+    }
+
+    public void write2BytesOF10(ChannelBuffer c) {
+        c.writeShort(this.getVlan());
+    }
+
+    public static VlanVid read2Bytes(ChannelBuffer c) throws OFParseError {
+        return VlanVid.ofVlan(c.readShort());
+    }
+
+    @Override
+    public VlanVid applyMask(VlanVid mask) {
+        return VlanVid.ofVlan((short)(this.vid & mask.vid));
+    }
+
+    @Override
+    public int compareTo(VlanVid o) {
+        return Shorts.compare(vid, o.vid);
+    }
+
+    @Override
+    public void putTo(PrimitiveSink sink) {
+        sink.putShort(vid);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/ActionUtils.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/ActionUtils.java
new file mode 100644
index 0000000..e0553a9
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/ActionUtils.java
@@ -0,0 +1,43 @@
+package org.projectfloodlight.openflow.util;
+
+import java.util.List;
+
+import org.projectfloodlight.openflow.protocol.OFFlowMod;
+import org.projectfloodlight.openflow.protocol.OFFlowStatsEntry;
+import org.projectfloodlight.openflow.protocol.OFInstructionType;
+import org.projectfloodlight.openflow.protocol.OFVersion;
+import org.projectfloodlight.openflow.protocol.action.OFAction;
+import org.projectfloodlight.openflow.protocol.instruction.OFInstruction;
+import org.projectfloodlight.openflow.protocol.instruction.OFInstructionApplyActions;
+
+import com.google.common.collect.ImmutableList;
+
+public class ActionUtils {
+    private ActionUtils() {}
+
+    public static List<OFAction> getActions(OFFlowStatsEntry e) {
+        if(e.getVersion() == OFVersion.OF_10) {
+            return e.getActions();
+        } else {
+            for(OFInstruction i: e.getInstructions()) {
+                if(i.getType() == OFInstructionType.APPLY_ACTIONS) {
+                    return ((OFInstructionApplyActions) i).getActions();
+                }
+            }
+            return ImmutableList.of();
+        }
+    }
+
+    public static List<OFAction> getActions(OFFlowMod e) {
+        if(e.getVersion() == OFVersion.OF_10) {
+            return e.getActions();
+        } else {
+            for(OFInstruction i: e.getInstructions()) {
+                if(i.getType() == OFInstructionType.APPLY_ACTIONS) {
+                    return ((OFInstructionApplyActions) i).getActions();
+                }
+            }
+            return ImmutableList.of();
+        }
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/ChannelUtils.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/ChannelUtils.java
new file mode 100644
index 0000000..1a1ac6a
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/ChannelUtils.java
@@ -0,0 +1,80 @@
+package org.projectfloodlight.openflow.util;
+
+import java.util.List;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+import org.projectfloodlight.openflow.protocol.OFMessageReader;
+import org.projectfloodlight.openflow.protocol.Writeable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+
+/**
+ * Collection of helper functions for reading and writing into ChannelBuffers
+ *
+ * @author capveg
+ */
+
+public class ChannelUtils {
+    private static final Logger logger = LoggerFactory.getLogger(ChannelUtils.class);
+    public static String readFixedLengthString(ChannelBuffer bb, int length) {
+        byte[] dst = new byte[length];
+        bb.readBytes(dst, 0, length);
+        int validLength = 0;
+        for (validLength = 0; validLength < length; validLength++) {
+            if (dst[validLength] == 0)
+                break;
+        }
+        return new String(dst, 0, validLength, Charsets.US_ASCII);
+    }
+
+    public static void writeFixedLengthString(ChannelBuffer bb, String string,
+            int length) {
+        int l = string.length();
+        if (l > length) {
+            throw new IllegalArgumentException("Error writing string: length="
+                    + l + " > max Length=" + length);
+        }
+        bb.writeBytes(string.getBytes(Charsets.US_ASCII));
+        if (l < length) {
+            bb.writeZero(length - l);
+        }
+    }
+
+    static public byte[] readBytes(final ChannelBuffer bb, final int length) {
+        byte byteArray[] = new byte[length];
+        bb.readBytes(byteArray);
+        return byteArray;
+    }
+
+    static public void writeBytes(final ChannelBuffer bb,
+            final byte byteArray[]) {
+        bb.writeBytes(byteArray);
+    }
+
+    public static <T> List<T> readList(ChannelBuffer bb, int length, OFMessageReader<T> reader) throws OFParseError {
+        int end = bb.readerIndex() + length;
+        Builder<T> builder = ImmutableList.<T>builder();
+        if(logger.isTraceEnabled())
+            logger.trace("readList(length={}, reader={})", length, reader.getClass());
+        while(bb.readerIndex() < end) {
+            T read = reader.readFrom(bb);
+            if(logger.isTraceEnabled())
+                logger.trace("readList: read={}, left={}", read, end - bb.readerIndex());
+            builder.add(read);
+        }
+        if(bb.readerIndex() != end) {
+            throw new IllegalStateException("Overread length: length="+length + " overread by "+ (bb.readerIndex() - end) + " reader: "+reader);
+        }
+        return builder.build();
+    }
+
+    public static void writeList(ChannelBuffer bb, List<? extends Writeable> writeables) {
+        for(Writeable w: writeables)
+            w.writeTo(bb);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/FunnelUtils.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/FunnelUtils.java
new file mode 100644
index 0000000..f62d7f9
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/FunnelUtils.java
@@ -0,0 +1,14 @@
+package org.projectfloodlight.openflow.util;
+
+import java.util.List;
+
+import org.projectfloodlight.openflow.types.PrimitiveSinkable;
+
+import com.google.common.hash.PrimitiveSink;
+
+public class FunnelUtils {
+    public static void putList(List<? extends PrimitiveSinkable> sinkables, PrimitiveSink sink) {
+        for(PrimitiveSinkable p: sinkables)
+            p.putTo(sink);
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/HexString.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/HexString.java
new file mode 100644
index 0000000..ddf0f25
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/HexString.java
@@ -0,0 +1,98 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.util;
+
+import java.math.BigInteger;
+
+import org.projectfloodlight.openflow.types.U8;
+
+public class HexString {
+    /**
+     * Convert a string of bytes to a ':' separated hex string
+     *
+     * @param bytes
+     * @return "0f:ca:fe:de:ad:be:ef"
+     */
+    public static String toHexString(final byte[] bytes) {
+        int i;
+        String ret = "";
+        String tmp;
+        for (i = 0; i < bytes.length; i++) {
+            if (i > 0)
+                ret += ":";
+            tmp = Integer.toHexString(U8.f(bytes[i]));
+            if (tmp.length() == 1)
+                ret += "0";
+            ret += tmp;
+        }
+        return ret;
+    }
+
+    public static String toHexString(final long val, final int padTo) {
+        char arr[] = Long.toHexString(val).toCharArray();
+        String ret = "";
+        // prepend the right number of leading zeros
+        int i = 0;
+        for (; i < (padTo * 2 - arr.length); i++) {
+            ret += "0";
+            if ((i % 2) != 0)
+                ret += ":";
+        }
+        for (int j = 0; j < arr.length; j++) {
+            ret += arr[j];
+            if ((((i + j) % 2) != 0) && (j < (arr.length - 1)))
+                ret += ":";
+        }
+        return ret;
+    }
+
+    public static String toHexString(final long val) {
+        return toHexString(val, 8);
+    }
+
+    /**
+     * Convert a string of hex values into a string of bytes
+     *
+     * @param values
+     *            "0f:ca:fe:de:ad:be:ef"
+     * @return [15, 5 ,2, 5, 17]
+     * @throws NumberFormatException
+     *             If the string can not be parsed
+     */
+    public static byte[] fromHexString(final String values) throws NumberFormatException {
+        String[] octets = values.split(":");
+        byte[] ret = new byte[octets.length];
+
+        for (int i = 0; i < octets.length; i++) {
+            if (octets[i].length() > 2)
+                throw new NumberFormatException("Invalid octet length");
+            ret[i] = Integer.valueOf(octets[i], 16).byteValue();
+        }
+        return ret;
+    }
+
+    public static long toLong(final String values) throws NumberFormatException {
+        // Long.parseLong() can't handle HexStrings with MSB set. Sigh.
+        BigInteger bi = new BigInteger(values.replaceAll(":", ""), 16);
+        if (bi.bitLength() > 64)
+            throw new NumberFormatException("Input string too big to fit in long: "
+                    + values);
+        return bi.longValue();
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/LRULinkedHashMap.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/LRULinkedHashMap.java
new file mode 100644
index 0000000..7798e67
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/LRULinkedHashMap.java
@@ -0,0 +1,42 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.util;
+
+import java.util.LinkedHashMap;
+
+public class LRULinkedHashMap<K, V> extends LinkedHashMap<K, V> {
+    private static final long serialVersionUID = -2964986094089626647L;
+    protected int maximumCapacity;
+
+    public LRULinkedHashMap(final int initialCapacity, final int maximumCapacity) {
+        super(initialCapacity, 0.75f, true);
+        this.maximumCapacity = maximumCapacity;
+    }
+
+    public LRULinkedHashMap(final int maximumCapacity) {
+        super(16, 0.75f, true);
+        this.maximumCapacity = maximumCapacity;
+    }
+
+    @Override
+    protected boolean removeEldestEntry(final java.util.Map.Entry<K, V> eldest) {
+        if (this.size() > maximumCapacity)
+            return true;
+        return false;
+    }
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/LengthCountingPseudoChannelBuffer.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/LengthCountingPseudoChannelBuffer.java
new file mode 100644
index 0000000..48362da
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/LengthCountingPseudoChannelBuffer.java
@@ -0,0 +1,673 @@
+package org.projectfloodlight.openflow.util;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.GatheringByteChannel;
+import java.nio.channels.ScatteringByteChannel;
+import java.nio.charset.Charset;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBufferFactory;
+import org.jboss.netty.buffer.ChannelBufferIndexFinder;
+
+public class LengthCountingPseudoChannelBuffer implements ChannelBuffer {
+
+    int writerIndex = 0;
+    private int markedWriterIndex;
+
+    @Override
+    public ChannelBufferFactory factory() {
+        return null;
+    }
+
+    @Override
+    public int capacity() {
+        return Integer.MAX_VALUE;
+    }
+
+    @Override
+    public ByteOrder order() {
+        return ByteOrder.BIG_ENDIAN;
+    }
+
+    @Override
+    public boolean isDirect() {
+        return true;
+    }
+
+    @Override
+    public int readerIndex() {
+        return 0;
+    }
+
+    @Override
+    public void readerIndex(int readerIndex) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int writerIndex() {
+        return writerIndex;
+    }
+
+    @Override
+    public void writerIndex(int writerIndex) {
+        this.writerIndex = writerIndex;
+    }
+
+    @Override
+    public void setIndex(int readerIndex, int writerIndex) {
+        if(readerIndex != 0)
+            throw new UnsupportedOperationException();
+        this.writerIndex = writerIndex;
+    }
+
+    @Override
+    public int readableBytes() {
+        return writerIndex;
+    }
+
+    @Override
+    public int writableBytes() {
+        return Integer.MAX_VALUE - writerIndex;
+    }
+
+    @Override
+    public boolean readable() {
+        return writerIndex > 0;
+    }
+
+    @Override
+    public boolean writable() {
+        return writerIndex < Integer.MAX_VALUE;
+    }
+
+    @Override
+    public void clear() {
+        writerIndex = 0;
+
+    }
+
+    @Override
+    public void markReaderIndex() {
+    }
+
+    @Override
+    public void resetReaderIndex() {
+    }
+
+    @Override
+    public void markWriterIndex() {
+        markedWriterIndex = writerIndex;
+    }
+
+    @Override
+    public void resetWriterIndex() {
+        writerIndex = markedWriterIndex;
+    }
+
+    @Override
+    public void discardReadBytes() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void ensureWritableBytes(int writableBytes) {
+        if(!((Integer.MAX_VALUE - writableBytes) > writerIndex))
+            throw new IllegalStateException();
+    }
+
+    @Override
+    public byte getByte(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public short getUnsignedByte(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public short getShort(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getUnsignedShort(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getMedium(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getUnsignedMedium(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getInt(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long getUnsignedInt(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long getLong(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public char getChar(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public float getFloat(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public double getDouble(int index) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, ChannelBuffer dst) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, ChannelBuffer dst, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, ChannelBuffer dst, int dstIndex, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, byte[] dst) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, byte[] dst, int dstIndex, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, ByteBuffer dst) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void getBytes(int index, OutputStream out, int length)
+            throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int getBytes(int index, GatheringByteChannel out, int length)
+            throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void setByte(int index, int value) {
+    }
+
+    @Override
+    public void setShort(int index, int value) {
+    }
+
+    @Override
+    public void setMedium(int index, int value) {
+    }
+
+    @Override
+    public void setInt(int index, int value) {
+    }
+
+    @Override
+    public void setLong(int index, long value) {
+    }
+
+    @Override
+    public void setChar(int index, int value) {
+    }
+
+    @Override
+    public void setFloat(int index, float value) {
+    }
+
+    @Override
+    public void setDouble(int index, double value) {
+    }
+
+    @Override
+    public void setBytes(int index, ChannelBuffer src) {
+    }
+
+    @Override
+    public void setBytes(int index, ChannelBuffer src, int length) {
+    }
+
+    @Override
+    public void setBytes(int index, ChannelBuffer src, int srcIndex, int length) {
+    }
+
+    @Override
+    public void setBytes(int index, byte[] src) {
+    }
+
+    @Override
+    public void setBytes(int index, byte[] src, int srcIndex, int length) {
+    }
+
+    @Override
+    public void setBytes(int index, ByteBuffer src) {
+
+    }
+
+    @Override
+    public int setBytes(int index, InputStream in, int length)
+            throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int setBytes(int index, ScatteringByteChannel in, int length)
+            throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void setZero(int index, int length) {
+    }
+
+    @Override
+    public byte readByte() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public short readUnsignedByte() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public short readShort() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int readUnsignedShort() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int readMedium() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int readUnsignedMedium() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int readInt() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long readUnsignedInt() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long readLong() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public char readChar() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public float readFloat() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public double readDouble() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer readBytes(int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    @Deprecated
+    public ChannelBuffer readBytes(ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer readSlice(int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    @Deprecated
+    public
+    ChannelBuffer readSlice(ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(ChannelBuffer dst) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(ChannelBuffer dst, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(ChannelBuffer dst, int dstIndex, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(byte[] dst) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(byte[] dst, int dstIndex, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(ByteBuffer dst) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void readBytes(OutputStream out, int length) throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int readBytes(GatheringByteChannel out, int length)
+            throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void skipBytes(int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    @Deprecated
+    public int skipBytes(ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void writeByte(int value) {
+        writerIndex++;
+    }
+
+    @Override
+    public void writeShort(int value) {
+    writerIndex += 2;
+}
+
+@Override
+public void writeMedium(int value) {
+    writerIndex += 3;
+}
+
+@Override
+public void writeInt(int value) {
+    writerIndex += 4;
+}
+
+@Override
+public void writeLong(long value) {
+    writerIndex += 8;
+}
+
+
+    @Override
+    public void writeChar(int value) {
+        writeShort(value);
+    }
+
+    @Override
+    public void writeFloat(float value) {
+        writeInt(Float.floatToIntBits(value));
+    }
+
+    @Override
+    public void writeDouble(double value) {
+        writeLong(Double.doubleToLongBits(value));
+
+    }
+
+    @Override
+    public void writeBytes(ChannelBuffer src) {
+        writerIndex += src.readableBytes();
+
+    }
+
+    @Override
+    public void writeBytes(ChannelBuffer src, int length) {
+        writerIndex += src.readableBytes();
+
+    }
+
+    @Override
+    public void writeBytes(ChannelBuffer src, int srcIndex, int length) {
+        writerIndex += length;
+    }
+
+    @Override
+    public void writeBytes(byte[] src) {
+        writerIndex += src.length;
+
+    }
+
+    @Override
+    public void writeBytes(byte[] src, int srcIndex, int length) {
+        writerIndex += length;
+    }
+
+    @Override
+    public void writeBytes(ByteBuffer src) {
+        writerIndex += src.remaining();
+
+    }
+
+    @Override
+    public int writeBytes(InputStream in, int length) throws IOException {
+        writerIndex += length;
+        return length;
+    }
+
+    @Override
+    public int writeBytes(ScatteringByteChannel in, int length)
+            throws IOException {
+        writerIndex += length;
+        return length;
+    }
+
+    @Override
+    public void writeZero(int length) {
+        writerIndex += length;
+
+    }
+
+    @Override
+    public int indexOf(int fromIndex, int toIndex, byte value) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int indexOf(int fromIndex, int toIndex,
+            ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int bytesBefore(byte value) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int bytesBefore(ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int bytesBefore(int length, byte value) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int bytesBefore(int length, ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int bytesBefore(int index, int length, byte value) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int bytesBefore(int index, int length,
+            ChannelBufferIndexFinder indexFinder) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer copy() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer copy(int index, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer slice() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer slice(int index, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ChannelBuffer duplicate() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ByteBuffer toByteBuffer(int index, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ByteBuffer[] toByteBuffers() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ByteBuffer[] toByteBuffers(int index, int length) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public boolean hasArray() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public byte[] array() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int arrayOffset() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public String toString(Charset charset) {
+        return "LengthCountingPseudoChannelBuffer(length="+writerIndex+")";
+    }
+
+    @Override
+    public String toString(int index, int length, Charset charset) {
+        return toString();
+    }
+
+    @Override
+    @Deprecated
+    public String toString(String charsetName) {
+        return toString();
+    }
+
+    @Override
+    @Deprecated
+    public String toString(String charsetName,
+            ChannelBufferIndexFinder terminatorFinder) {
+        return toString();
+    }
+
+    @Override
+    @Deprecated
+    public String toString(int index, int length, String charsetName) {
+        return toString();
+    }
+
+    @Override
+    @Deprecated
+    public
+    String toString(int index, int length, String charsetName,
+            ChannelBufferIndexFinder terminatorFinder) {
+        return toString();
+    }
+
+    @Override
+    public int compareTo(ChannelBuffer buffer) {
+        throw new UnsupportedOperationException();
+
+    }
+
+}
diff --git a/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/StringByteSerializer.java b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/StringByteSerializer.java
new file mode 100644
index 0000000..6949fb2
--- /dev/null
+++ b/java_gen/pre-written/src/main/java/org/projectfloodlight/openflow/util/StringByteSerializer.java
@@ -0,0 +1,58 @@
+/**
+ *    Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
+ *    University
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ **/
+
+package org.projectfloodlight.openflow.util;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+public class StringByteSerializer {
+    public static String readFrom(final ChannelBuffer data, final int length) {
+        byte[] stringBytes = new byte[length];
+        data.readBytes(stringBytes);
+        // find the first index of 0
+        int index = 0;
+        for (byte b : stringBytes) {
+            if (0 == b)
+                break;
+            ++index;
+        }
+        return new String(Arrays.copyOf(stringBytes, index), Charset.forName("ascii"));
+    }
+
+    public static void writeTo(final ChannelBuffer data, final int length,
+            final String value) {
+        try {
+            byte[] name = value.getBytes("ASCII");
+            if (name.length < length) {
+                data.writeBytes(name);
+                for (int i = name.length; i < length; ++i) {
+                    data.writeByte((byte) 0);
+                }
+            } else {
+                data.writeBytes(name, 0, length - 1);
+                data.writeByte((byte) 0);
+            }
+        } catch (UnsupportedEncodingException e) {
+            throw new RuntimeException(e);
+        }
+
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPAddressTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPAddressTest.java
new file mode 100644
index 0000000..25fc943
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPAddressTest.java
@@ -0,0 +1,54 @@
+package org.projectfloodlight.openflow.types;
+
+import static org.junit.Assert.*;
+
+import org.junit.Test;
+
+/**
+ * Most tests are in IPv4AddressTest and IPv6AddressTest
+ * Just exception testing here
+ * @author gregor
+ *
+ */
+public class IPAddressTest {
+    @Test
+    public void testOfException() {
+        try {
+            IPAddress.of("Foobar");
+            fail("Should have thrown IllegalArgumentException");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        try {
+            IPAddressWithMask.of("Foobar");
+            fail("Should have thrown IllegalArgumentException");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        try {
+            IPAddress.of(null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            IPAddressWithMask.of(null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            IPAddress.of(null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            IPAddressWithMask.of(null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+    }
+
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPv4AddressTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPv4AddressTest.java
new file mode 100644
index 0000000..334ec0d
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPv4AddressTest.java
@@ -0,0 +1,340 @@
+package org.projectfloodlight.openflow.types;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.hamcrest.CoreMatchers;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.junit.Test;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+public class IPv4AddressTest {
+    byte[][] testAddresses = new byte[][] {
+            {0x01, 0x02, 0x03, 0x04 },
+            {127, 0, 0, 1},
+            {(byte) 192, (byte) 168, 0, 100 },
+            {(byte) 255, (byte) 255, (byte) 255, (byte) 255 }
+    };
+
+    String[] testStrings = {
+            "1.2.3.4",
+            "127.0.0.1",
+            "192.168.0.100",
+            "255.255.255.255"
+    };
+
+    int[] testInts = {
+            0x01020304,
+            0x7f000001,
+            (192 << 24) | (168 << 16) | 100,
+            0xffffffff
+    };
+
+    String[] invalidIPs = {
+            "",
+            ".",
+            "1.2..3.4",
+            "1.2.3.4.",
+            "257.11.225.1",
+            "256.11.225.1",
+            "-1.2.3.4",
+            "1.2.3.4.5",
+            "1.x.3.4",
+            "1.2x.3.4"
+    };
+
+    String[] ipsWithMask = {
+                            "1.2.3.4/24",
+                            "192.168.130.140/255.255.192.0",
+                            "127.0.0.1/8",
+                            "8.8.8.8",
+                            "8.8.8.8/32",
+                            "0.0.0.0/0",
+                            "192.168.130.140/255.0.255.0",
+                            "1.2.3.4/0.127.0.255"
+    };
+
+    boolean[] hasMask = {
+                         true,
+                         true,
+                         true,
+                         false,
+                         false,
+                         true,
+                         true,
+                         true
+    };
+
+    byte[][][] ipsWithMaskValues = {
+                             new byte[][] { new byte[] { (byte)0x01, (byte)0x02, (byte)0x03, (byte)0x04 }, new byte[] { (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0x00 } },
+                             new byte[][] { new byte[] { (byte)0xC0, (byte)0xA8, (byte)0x82, (byte)0x8C }, new byte[] { (byte)0xFF, (byte)0xFF, (byte)0xC0, (byte)0x00 } },
+                             new byte[][] { new byte[] { (byte)0x7F, (byte)0x00, (byte)0x00, (byte)0x01 }, new byte[] { (byte)0xFF, (byte)0x00, (byte)0x00, (byte)0x00 } },
+                             new byte[][] { new byte[] { (byte)0x08, (byte)0x08, (byte)0x08, (byte)0x08 }, new byte[] { (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF } },
+                             new byte[][] { new byte[] { (byte)0x08, (byte)0x08, (byte)0x08, (byte)0x08 }, new byte[] { (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF } },
+                             new byte[][] { new byte[] { (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00 }, new byte[] { (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00 } },
+                             new byte[][] { new byte[] { (byte)0xC0, (byte)0xA8, (byte)0x82, (byte)0x8C }, new byte[] { (byte)0xFF, (byte)0x00, (byte)0xFF, (byte)0x00 } },
+                             new byte[][] { new byte[] { (byte)0x01, (byte)0x02, (byte)0x03, (byte)0x04 }, new byte[] { (byte)0x00, (byte)0x7F, (byte)0x00, (byte)0xFF } }
+    };
+
+    int[] ipsWithMaskLengths = {
+                                24,
+                                18,
+                                8,
+                                32,
+                                32,
+                                0,
+                                -1,
+                                -1
+    };
+
+    String[] invalidIpsWithMask = {
+                                   "asdf",
+                                   "1.2.3.4/33",
+                                   "1.2.3.4/34",
+                                   "1.2.3.4/-1",
+                                   "1.2.3.4/256.0.0.0",
+                                   "1.256.3.4/255.255.0.0",
+                                   "1.2.3.4/255.255.0.0.0",
+    };
+
+    @Test
+    public void testMaskedMatchesCidr() {
+        IPv4AddressWithMask slash28 = IPv4AddressWithMask.of("10.0.42.16/28");
+
+        String[] notContained = {"0.0.0.0", "11.0.42.16", "10.0.41.1", "10.0.42.0", "10.0.42.15",
+                                 "10.0.42.32", "255.255.255.255" };
+
+        for(String n: notContained) {
+            assertThat(String.format("slash 28 %s should not contain address %s",
+                                     slash28, n),
+                    slash28.matches(IPv4Address.of(n)), equalTo(false));
+        }
+        for(int i=16; i < 32; i++) {
+            IPv4Address c = IPv4Address.of(String.format("10.0.42.%d", i));
+            assertThat(String.format("slash 28 %s should contain address %s",
+                                     slash28, c),
+                       slash28.matches(c), equalTo(true));
+        }
+    }
+
+    @Test
+    public void testMaskedMatchesArbitrary() {
+        // irregular octect on the 3rd bitmask requires '1'bit to be set
+        // 4 bit unset, all others arbitrary
+        IPv4AddressWithMask slash28 = IPv4AddressWithMask.of("1.2.1.4/255.255.5.255");
+
+        String[] notContained = {"0.0.0.0", "1.2.3.5", "1.2.3.3",
+                                 "1.2.0.4", "1.2.2.4", "1.2.4.4", "1.2.5.4", "1.2.6.4", "1.2.7.4",
+                                 "1.2.8.4", "1.2.12.4", "1.2.13.4"
+                                 };
+        String[] contained = {"1.2.1.4", "1.2.3.4", "1.2.9.4", "1.2.11.4", "1.2.251.4",
+                };
+
+        for(String n: notContained) {
+            assertThat(String.format("slash 28 %s should not contain address %s",
+                                     slash28, n),
+                    slash28.matches(IPv4Address.of(n)), equalTo(false));
+        }
+        for(String c: contained) {
+            IPv4Address addr = IPv4Address.of(c);
+            assertThat(String.format("slash 28 %s should contain address %s",
+                                     slash28, addr),
+                       slash28.matches(addr), equalTo(true));
+        }
+
+    }
+
+
+    @Test
+    public void testConstants() {
+        byte[] zeros = { (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00 };
+        byte[] ones =  { (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF };
+        // Make sure class initializtation and static assignment don't get
+        // messed up. Test everything twice for cached values
+        assertTrue(IPv4Address.NONE.isCidrMask());
+        assertEquals(0, IPv4Address.NONE.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv4Address.NONE.getBytes());
+        assertTrue(IPv4Address.NONE.isCidrMask());
+        assertEquals(0, IPv4Address.NONE.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv4Address.NONE.getBytes());
+
+        assertTrue(IPv4Address.NO_MASK.isCidrMask());
+        assertEquals(32, IPv4Address.NO_MASK.asCidrMaskLength());
+        assertArrayEquals(ones, IPv4Address.NO_MASK.getBytes());
+        assertTrue(IPv4Address.NO_MASK.isCidrMask());
+        assertEquals(32, IPv4Address.NO_MASK.asCidrMaskLength());
+        assertArrayEquals(ones, IPv4Address.NO_MASK.getBytes());
+
+        assertTrue(IPv4Address.FULL_MASK.isCidrMask());
+        assertEquals(0, IPv4Address.FULL_MASK.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv4Address.FULL_MASK.getBytes());
+        assertTrue(IPv4Address.FULL_MASK.isCidrMask());
+        assertEquals(0, IPv4Address.FULL_MASK.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv4Address.FULL_MASK.getBytes());
+    }
+
+
+    @Test
+    public void testOfString() {
+        for(int i=0; i < testAddresses.length; i++ ) {
+            IPv4Address ip = IPv4Address.of(testStrings[i]);
+            assertEquals(testInts[i], ip.getInt());
+            assertArrayEquals(testAddresses[i], ip.getBytes());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+    @Test
+    public void testOfByteArray() {
+        for(int i=0; i < testAddresses.length; i++ ) {
+            IPv4Address ip = IPv4Address.of(testAddresses[i]);
+            assertEquals(testInts[i], ip.getInt());
+            assertArrayEquals(testAddresses[i], ip.getBytes());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+    @Test
+    public void testReadFrom() throws OFParseError {
+        for(int i=0; i < testAddresses.length; i++ ) {
+            IPv4Address ip = IPv4Address.read4Bytes(ChannelBuffers.copiedBuffer(testAddresses[i]));
+            assertEquals(testInts[i], ip.getInt());
+            assertArrayEquals(testAddresses[i], ip.getBytes());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+
+    @Test
+    public void testInvalidIPs() throws OFParseError {
+        for(String invalid : invalidIPs) {
+            try {
+                IPv4Address.of(invalid);
+                fail("Invalid IP "+invalid+ " should have raised IllegalArgumentException");
+            } catch(IllegalArgumentException e) {
+                // ok
+            }
+        }
+    }
+
+    @Test
+    public void testOfMasked() throws OFParseError {
+        for (int i = 0; i < ipsWithMask.length; i++) {
+            IPv4AddressWithMask value = IPv4AddressWithMask.of(ipsWithMask[i]);
+            if (!hasMask[i]) {
+                IPv4Address ip = value.getValue();
+                assertArrayEquals(ipsWithMaskValues[i][0], ip.getBytes());
+            }
+            IPv4Address mask = value.getMask();
+            if (ipsWithMaskLengths[i] == -1) {
+                assertFalse(mask.isCidrMask());
+                try {
+                    mask.asCidrMaskLength();
+                    fail("Expected IllegalStateException not thrown");
+                } catch(IllegalStateException e) {
+                    //expected
+                }
+            } else {
+                assertTrue(mask.isCidrMask());
+                assertEquals(ipsWithMaskLengths[i], mask.asCidrMaskLength());
+            }
+            assertArrayEquals(ipsWithMaskValues[i][1], mask.getBytes());
+            byte[] ipBytes = new byte[4];
+            System.arraycopy(ipsWithMaskValues[i][0], 0, ipBytes, 0, 4);
+            assertEquals(ipBytes.length, value.getValue().getBytes().length);
+            for (int j = 0; j < ipBytes.length; j++) {
+                ipBytes[j] &= ipsWithMaskValues[i][1][j];
+            }
+
+            assertArrayEquals(ipBytes, value.getValue().getBytes());
+            assertThat(String.format("Byte comparison for mask of %s (%s)", ipsWithMask[i], value),
+                    value.getMask().getBytes(), CoreMatchers.equalTo(ipsWithMaskValues[i][1]));
+        }
+    }
+
+    @Test
+    public void testOfMaskedInvalid() throws Exception {
+        for(String invalid : invalidIpsWithMask) {
+            try {
+                IPv4Address.of(invalid);
+                fail("Invalid IP "+invalid+ " should have raised IllegalArgumentException");
+            } catch(IllegalArgumentException e) {
+                // ok
+            }
+        }
+    }
+
+    @Test
+    public void testSuperclass() throws Exception {
+        for(String ipString: testStrings) {
+            IPAddress<?> superIp = IPAddress.of(ipString);
+            assertEquals(IPVersion.IPv4, superIp.getIpVersion());
+            assertEquals(IPv4Address.of(ipString), superIp);
+        }
+
+        for(String ipMaskedString: ipsWithMask) {
+            IPAddressWithMask<?> superIp = IPAddressWithMask.of(ipMaskedString);
+            assertEquals(IPVersion.IPv4, superIp.getIpVersion());
+            assertEquals(IPv4AddressWithMask.of(ipMaskedString), superIp);
+        }
+    }
+
+    @Test
+    public void testOfExceptions() {
+        // We check if the message of a caught NPE is set to a useful message
+        // as a hacky way of verifying that we got an NPE thrown by use rather
+        // than one the JVM created for a null access.
+        try {
+            String s = null;
+            IPv4Address.of(s);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            byte[] b = null;
+            IPv4Address.of(b);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            byte[] b = new byte[3];
+            IPv4Address.of(b);
+            fail("Should have thrown IllegalArgumentException");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        try {
+            byte[] b = new byte[5];
+            IPv4Address.of(b);
+            fail("Should have thrown IllegalArgumentException");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        try {
+            IPv4AddressWithMask.of(null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            IPv4AddressWithMask.of(IPv4Address.of("1.2.3.4"), null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            IPv4AddressWithMask.of(null, IPv4Address.of("255.0.0.0"));
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPv6AddressTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPv6AddressTest.java
new file mode 100644
index 0000000..c521292
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/IPv6AddressTest.java
@@ -0,0 +1,286 @@
+package org.projectfloodlight.openflow.types;
+
+import static org.junit.Assert.*;
+
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.hamcrest.CoreMatchers;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.junit.Test;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+import com.google.common.io.BaseEncoding;
+
+public class IPv6AddressTest {
+
+    String[] testStrings = {
+            "::",
+            "::1",
+            "ffe0::",
+            "1:2:3:4:5:6:7:8"
+    };
+
+
+    private final BaseEncoding hex = BaseEncoding.base16().omitPadding().lowerCase();
+
+    private class WithMaskTaskCase {
+        final String input;
+        boolean hasMask;
+        int expectedMaskLength = 128;
+        byte[] expectedMask = hex.decode("ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff".replaceAll(" ", ""));
+
+        public WithMaskTaskCase(String input) {
+            super();
+            this.input = input;
+        }
+
+        public WithMaskTaskCase maskHex(String string) {
+            string = string.replaceAll(" ", "");
+            this.hasMask = true;
+            expectedMask = hex.decode(string);
+            return this;
+        }
+
+        public WithMaskTaskCase expectedMaskLength(int expectedLength) {
+            this.expectedMaskLength = expectedLength;
+            return this;
+        }
+
+    }
+
+    WithMaskTaskCase[] withMasks = new WithMaskTaskCase[] {
+            new WithMaskTaskCase("1::1/80")
+                .maskHex("ff ff ff ff ff ff ff ff ff ff 00 00 00 00 00 00")
+                .expectedMaskLength(80),
+
+            new WithMaskTaskCase("ffff:ffee:1::/ff00:ff00:ff00:ff00::")
+                .maskHex("ff 00 ff 00 ff 00 ff 00 00 00 00 00 00 00 00 00")
+                .expectedMaskLength(-1),
+            new WithMaskTaskCase("1:2:3:4:5:6:7:8/1::ff00:ff00")
+                .maskHex("00 01 00 00 00 00 00 00 00 00 00 00 ff 00 ff 00")
+                .expectedMaskLength(-1),
+            new WithMaskTaskCase("1:2:3:4:5:6:7:8/::ff00:ff00")
+                .maskHex("00 00 00 00 00 00 00 00 00 00 00 00 ff 00 ff 00")
+                .expectedMaskLength(-1),
+            new WithMaskTaskCase("1:2:3:4:5:6:7:8/ffff:ffff:ffff:ffff:ffff::ff00:ff00")
+                .maskHex("ff ff ff ff ff ff ff ff ff ff 00 00 ff 00 ff 00")
+                .expectedMaskLength(-1),
+            new WithMaskTaskCase("8:8:8:8:8:8:8:8"),
+            new WithMaskTaskCase("8:8:8:8:8:8:8:8"),
+            new WithMaskTaskCase("1:2:3:4:5:6:7:8/128"),
+            new WithMaskTaskCase("::/0")
+                .maskHex("00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00")
+                .expectedMaskLength(0),
+    };
+
+    @Test
+    public void testConstants() {
+        byte[] zeros = { (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00,
+                         (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00,
+                         (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00,
+                         (byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00 };
+        byte[] ones = { (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF,
+                        (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF,
+                        (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF,
+                        (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF };
+        // Make sure class initializtation and static assignment don't get
+        // messed up. Test everything twice for cached values
+        assertTrue(IPv6Address.NONE.isCidrMask());
+        assertEquals(0, IPv6Address.NONE.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv6Address.NONE.getBytes());
+        assertTrue(IPv6Address.NONE.isCidrMask());
+        assertEquals(0, IPv6Address.NONE.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv6Address.NONE.getBytes());
+
+        assertTrue(IPv6Address.NO_MASK.isCidrMask());
+        assertEquals(128, IPv6Address.NO_MASK.asCidrMaskLength());
+        assertArrayEquals(ones, IPv6Address.NO_MASK.getBytes());
+        assertTrue(IPv6Address.NO_MASK.isCidrMask());
+        assertEquals(128, IPv6Address.NO_MASK.asCidrMaskLength());
+        assertArrayEquals(ones, IPv6Address.NO_MASK.getBytes());
+
+        assertTrue(IPv6Address.FULL_MASK.isCidrMask());
+        assertEquals(0, IPv6Address.FULL_MASK.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv6Address.FULL_MASK.getBytes());
+        assertTrue(IPv6Address.FULL_MASK.isCidrMask());
+        assertEquals(0, IPv6Address.FULL_MASK.asCidrMaskLength());
+        assertArrayEquals(zeros, IPv6Address.FULL_MASK.getBytes());
+    }
+
+    @Test
+    public void testMasked() throws UnknownHostException {
+        for(WithMaskTaskCase w: withMasks) {
+            IPv6AddressWithMask value = IPv6AddressWithMask.of(w.input);
+            if (!w.hasMask) {
+                IPv6Address ip = value.getValue();
+                InetAddress inetAddress = InetAddress.getByName(w.input.split("/")[0]);
+
+                assertArrayEquals(ip.getBytes(), inetAddress.getAddress());
+                assertEquals(w.input.split("/")[0], ip.toString());
+            }
+            InetAddress inetAddress = InetAddress.getByName(w.input.split("/")[0]);
+
+            if (w.expectedMaskLength == -1) {
+                assertFalse(value.getMask().isCidrMask());
+                try {
+                    value.getMask().asCidrMaskLength();
+                    fail("Expected IllegalStateException not thrown");
+                } catch(IllegalStateException e) {
+                    //expected
+                }
+            } else {
+                assertTrue(value.getMask().isCidrMask());
+                assertEquals("Input " + w.input, w.expectedMaskLength,
+                             value.getMask().asCidrMaskLength());
+            }
+
+            byte[] address = inetAddress.getAddress();
+            assertEquals(address.length, value.getValue().getBytes().length);
+
+            for (int j = 0; j < address.length; j++) {
+                address[j] &= w.expectedMask[j];
+            }
+
+            assertThat("Address bytes for input " + w.input + ", value=" + value, value.getValue().getBytes(), CoreMatchers.equalTo(address));
+            assertThat("mask check for input " + w.input + ", value=" + value, value.getMask().getBytes(), CoreMatchers.equalTo(w.expectedMask));
+        }
+        for (int i = 0; i <= 128; i++) {
+            String ipString = String.format("8001:2::1/%d", i);
+            IPv6AddressWithMask value = IPv6AddressWithMask.of(ipString);
+            assertEquals("Input " + ipString, i, value.getMask().asCidrMaskLength());
+        }
+    }
+
+
+    @Test
+    public void testOfString() throws UnknownHostException {
+        for(int i=0; i < testStrings.length; i++ ) {
+            IPv6Address ip = IPv6Address.of(testStrings[i]);
+            InetAddress inetAddress = InetAddress.getByName(testStrings[i]);
+
+            assertArrayEquals(ip.getBytes(), inetAddress.getAddress());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+    @Test
+    public void testOfByteArray() throws UnknownHostException {
+        for(int i=0; i < testStrings.length; i++ ) {
+            byte[] bytes = Inet6Address.getByName(testStrings[i]).getAddress();
+            IPv6Address ip = IPv6Address.of(bytes);
+            assertEquals(testStrings[i], ip.toString());
+            assertArrayEquals(bytes, ip.getBytes());
+        }
+    }
+
+    @Test
+    public void testReadFrom() throws OFParseError, UnknownHostException {
+        for(int i=0; i < testStrings.length; i++ ) {
+            byte[] bytes = Inet6Address.getByName(testStrings[i]).getAddress();
+            IPv6Address ip = IPv6Address.read16Bytes(ChannelBuffers.copiedBuffer(bytes));
+            assertEquals(testStrings[i], ip.toString());
+            assertArrayEquals(bytes, ip.getBytes());
+        }
+    }
+
+    String[] invalidIPs = {
+            "",
+            ":",
+            "1:2:3:4:5:6:7:8:9",
+            "1:2:3:4:5:6:7:8:",
+            "1:2:3:4:5:6:7:8g",
+            "1:2:3:",
+            "12345::",
+            "1::3::8",
+            "::3::"
+    };
+
+    @Test
+    public void testInvalidIPs() throws OFParseError {
+        for(String invalid : invalidIPs) {
+            try {
+                IPv6Address.of(invalid);
+                fail("Invalid IP "+invalid+ " should have raised IllegalArgumentException");
+            } catch(IllegalArgumentException e) {
+                // ok
+            }
+        }
+    }
+
+    @Test
+    public void testZeroCompression() throws OFParseError {
+        assertEquals("::", IPv6Address.of("::").toString(true, false));
+        assertEquals("0:0:0:0:0:0:0:0", IPv6Address.of("::").toString(false, false));
+        assertEquals("0000:0000:0000:0000:0000:0000:0000:0000", IPv6Address.of("::").toString(false, true));
+        assertEquals("1::4:5:6:0:8", IPv6Address.of("1:0:0:4:5:6:0:8").toString(true, false));
+        assertEquals("1:0:0:4::8", IPv6Address.of("1:0:0:4:0:0:0:8").toString(true, false));
+    }
+
+    @Test
+    public void testSuperclass() throws Exception {
+        for(String ipString: testStrings) {
+            IPAddress<?> superIp = IPAddress.of(ipString);
+            assertEquals(IPVersion.IPv6, superIp.getIpVersion());
+            assertEquals(IPv6Address.of(ipString), superIp);
+        }
+
+        for(WithMaskTaskCase w: withMasks) {
+            String ipMaskedString = w.input;
+            IPAddressWithMask<?> superIp = IPAddressWithMask.of(ipMaskedString);
+            assertEquals(IPVersion.IPv6, superIp.getIpVersion());
+            assertEquals(IPv6AddressWithMask.of(ipMaskedString), superIp);
+        }
+    }
+
+    @Test
+    public void testOfExceptions() throws Exception {
+        try {
+            IPv6AddressWithMask.of(null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            String s = null;
+            IPv6Address.of(s);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            byte[] b = null;
+            IPv6Address.of(b);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            byte[] b = new byte[7];
+            IPv6Address.of(b);
+            fail("Should have thrown IllegalArgumentException");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        try {
+            byte[] b = new byte[9];
+            IPv6Address.of(b);
+            fail("Should have thrown IllegalArgumentException");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        try {
+            IPv6AddressWithMask.of(IPv6Address.of("1::"), null);
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+        try {
+            IPv6AddressWithMask.of(null, IPv6Address.of("255::"));
+            fail("Should have thrown NullPointerException");
+        } catch (NullPointerException e) {
+            assertNotNull(e.getMessage());
+        }
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/MacAddressTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/MacAddressTest.java
new file mode 100644
index 0000000..a13fdd4
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/MacAddressTest.java
@@ -0,0 +1,154 @@
+package org.projectfloodlight.openflow.types;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Arrays;
+
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.junit.Test;
+import org.projectfloodlight.openflow.exceptions.OFParseError;
+
+public class MacAddressTest {
+    byte[][] testAddresses = new byte[][] {
+            {0x01, 0x02, 0x03, 0x04, 0x05, 0x06 },
+            {(byte) 0x80, 0x0, 0x0, 0x0, 0x0, 0x01},
+            {(byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255 }
+    };
+
+    String[] testStrings = {
+            "01:02:03:04:05:06",
+            "80:00:00:00:00:01",
+            "ff:ff:ff:ff:ff:ff"
+    };
+
+    long[] testInts = {
+            0x00010203040506L,
+            0x00800000000001L,
+            0x00ffffffffffffL
+    };
+
+    String[] invalidMacStrings = {
+            "",
+            "1.2.3.4",
+            "0T:00:01:02:03:04",
+            "00:01:02:03:04:05:06",
+            "00:ff:ef:12:12:ff:",
+            "00:fff:ef:12:12:ff",
+            "01:02:03:04:05;06",
+            "0:1:2:3:4:5:6",
+            "01:02:03:04"
+    };
+
+    byte[][] invalidMacBytes = {
+            new byte[]{0x01, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
+            new byte[]{0x01, 0x01, 0x02, 0x03, 0x04}
+    };
+
+    @Test
+    public void testOfString() {
+        for(int i=0; i < testAddresses.length; i++ ) {
+            MacAddress ip = MacAddress.of(testStrings[i]);
+            assertEquals(testInts[i], ip.getLong());
+            assertArrayEquals(testAddresses[i], ip.getBytes());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+    @Test
+    public void testOfByteArray() {
+        for(int i=0; i < testAddresses.length; i++ ) {
+            MacAddress ip = MacAddress.of(testAddresses[i]);
+            assertEquals("error checking long representation of "+Arrays.toString(testAddresses[i]) + "(should be "+Long.toHexString(testInts[i]) +")", testInts[i],  ip.getLong());
+            assertArrayEquals(testAddresses[i], ip.getBytes());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+    @Test
+    public void testReadFrom() throws OFParseError {
+        for(int i=0; i < testAddresses.length; i++ ) {
+            MacAddress ip = MacAddress.read6Bytes(ChannelBuffers.copiedBuffer(testAddresses[i]));
+            assertEquals(testInts[i], ip.getLong());
+            assertArrayEquals(testAddresses[i], ip.getBytes());
+            assertEquals(testStrings[i], ip.toString());
+        }
+    }
+
+
+    @Test
+    public void testInvalidMacStrings() throws OFParseError {
+        for(String invalid : invalidMacStrings) {
+            try {
+                MacAddress.of(invalid);
+                fail("Invalid MAC address "+invalid+ " should have raised IllegalArgumentException");
+            } catch(IllegalArgumentException e) {
+                // ok
+            }
+        }
+    }
+
+    @Test
+    public void testInvalidMacBytes() throws OFParseError {
+        for(byte[] invalid : invalidMacBytes) {
+            try {
+                MacAddress.of(invalid);
+                fail("Invalid MAC address bytes "+ Arrays.toString(invalid) + " should have raised IllegalArgumentException");
+            } catch(IllegalArgumentException e) {
+                // ok
+            }
+        }
+    }
+
+    //  Test data is imported from org.projectfloodlight.packet.EthernetTest
+    @Test
+    public void testToLong() {
+        assertEquals(
+                281474976710655L,
+                MacAddress.of(new byte[]{(byte) 0xff, (byte) 0xff,
+                        (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff}).getLong());
+
+        assertEquals(
+                1103823438081L,
+                MacAddress.of(new byte[] { (byte) 0x01, (byte) 0x01,
+                        (byte) 0x01, (byte) 0x01, (byte) 0x01, (byte) 0x01 }).getLong());
+
+        assertEquals(
+                141289400074368L,
+                MacAddress.of(new byte[] { (byte) 0x80, (byte) 0x80,
+                        (byte) 0x80, (byte) 0x80, (byte) 0x80, (byte) 0x80 }).getLong());
+
+    }
+
+    @Test
+    public void testIsBroadcast() {
+        assertTrue(MacAddress.of("FF:FF:FF:FF:FF:FF").isBroadcast());
+        assertTrue(MacAddress.of(-1).isBroadcast());
+        assertTrue(MacAddress.of(0x05FFFFFFFFFFFFL).isBroadcast());
+        assertFalse(MacAddress.of("11:22:33:44:55:66").isBroadcast());
+    }
+
+    @Test
+    public void testIsMulticast() {
+        assertTrue(MacAddress.of("01:80:C2:00:00:00").isMulticast());
+        assertFalse(MacAddress.of("00:80:C2:00:00:00").isMulticast());
+        assertFalse(MacAddress.of("FE:80:C2:00:00:00").isMulticast());
+        assertFalse(MacAddress.of(-1).isMulticast());
+        assertFalse(MacAddress.of(0x05FFFFFFFFFFFFL).isMulticast());
+        assertFalse(MacAddress.of("FF:FF:FF:FF:FF:FF").isMulticast());
+    }
+
+    @Test
+    public void testIsLLDPAddress() {
+        assertTrue(MacAddress.of("01:80:C2:00:00:00").isLLDPAddress());
+        assertTrue(MacAddress.of("01:80:C2:00:00:0f").isLLDPAddress());
+        assertFalse(MacAddress.of("01:80:C2:00:00:50").isLLDPAddress());
+        assertFalse(MacAddress.of("01:80:C2:00:10:00").isLLDPAddress());
+        assertFalse(MacAddress.of("01:80:C2:40:00:01").isLLDPAddress());
+        assertFalse(MacAddress.of("00:80:C2:f0:00:00").isLLDPAddress());
+        assertFalse(MacAddress.of("FE:80:C2:00:00:00").isLLDPAddress());
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/OFPortBitMapTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/OFPortBitMapTest.java
new file mode 100644
index 0000000..4db84f1
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/OFPortBitMapTest.java
@@ -0,0 +1,71 @@
+package org.projectfloodlight.openflow.types;
+
+import static org.hamcrest.Matchers.contains;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertThat;
+import junit.framework.TestCase;
+
+import org.junit.Test;
+
+public class OFPortBitMapTest extends TestCase {
+    @Test
+    public void testCreateAndIterate() {
+        OFPortBitMap map = OFPortBitMap.ofPorts(OFPort.of(1), OFPort.of(2), OFPort.of(5));
+
+        assertThat(map.getOnPorts(), contains(OFPort.of(1), OFPort.of(2), OFPort.of(5)));
+    }
+
+    @Test
+    public void testOFBitMap() {
+        OFBitMask128 bitmap = OFBitMask128.of(0xFFFF_FFFF_FFFF_FFFFL, 0xFFFF_FFFF_FFFF_FFD9L);
+
+        OFPortBitMap map = OFPortBitMap.of(bitmap);
+
+        assertThat(map.getOnPorts(), contains(OFPort.of(1), OFPort.of(2), OFPort.of(5)));
+    }
+
+    @Test
+    public void testOFPortBitMap() {
+        Boolean[] on = new Boolean[127];
+        for (int i = 0; i < 127; i++) {
+            on[i] = false;
+        }
+
+        OFPortBitMap.Builder builder = new OFPortBitMap.Builder();
+
+        for (int i = 0; i < 127; i += 3) {
+            OFPort p = OFPort.of(i);
+            builder.set(p);
+            on[p.getPortNumber()] = true;
+        }
+
+        // Test that all ports that were added are actually on, and all other ports are off
+        OFPortBitMap portmap = builder.build();
+        //System.out.println(portmap);
+        Boolean[] actual = new Boolean[127];
+        for (int i = 0; i < 127; i++) {
+            actual[i] = false;
+        }
+        for (int i = 0; i < 127; i++) {
+            actual[i] = portmap.isOn(OFPort.of(i));
+        }
+        assertArrayEquals(on, actual);
+
+        // Turn some ports off
+        for (int i = 0; i < 127; i += 7) {
+            on[i] = false;
+            builder.unset(OFPort.of(i));
+        }
+
+        // Test again
+        portmap = builder.build();
+        actual = new Boolean[127];
+        for (int i = 0; i < 127; i++) {
+            actual[i] = false;
+        }
+        for (int i = 0; i < 127; i++) {
+            actual[i] = portmap.isOn(OFPort.of(i));
+        }
+        assertArrayEquals(on, actual);
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/U64Test.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/U64Test.java
new file mode 100644
index 0000000..e45e8a0
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/openflow/types/U64Test.java
@@ -0,0 +1,26 @@
+package org.projectfloodlight.openflow.types;
+
+import static org.junit.Assert.assertEquals;
+
+import java.math.BigInteger;
+
+import org.junit.Test;
+
+public class U64Test {
+
+    @Test
+    public void testPositiveRaws() {
+        for(long positive: new long[] { 0, 1, 100, Long.MAX_VALUE }) {
+            assertEquals(positive, U64.ofRaw(positive).getValue());
+            assertEquals(BigInteger.valueOf(positive), U64.ofRaw(positive).getBigInteger());
+        }
+    }
+
+    @Test
+    public void testNegativeRaws() {
+        long minus_1 = 0xFFffFFffFFffFFffL;
+        assertEquals(minus_1, U64.ofRaw(minus_1).getValue());
+        assertEquals(new BigInteger("FFffFFffFFffFFff", 16),  U64.ofRaw(minus_1).getBigInteger());
+        assertEquals(new BigInteger("18446744073709551615"),  U64.ofRaw(minus_1).getBigInteger());
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/OFOxmListTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/OFOxmListTest.java
new file mode 100644
index 0000000..39e8c0c
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/OFOxmListTest.java
@@ -0,0 +1,41 @@
+package org.projectfloodlight.protocol;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+
+import org.hamcrest.CoreMatchers;
+import org.junit.Before;
+import org.junit.Test;
+import org.projectfloodlight.openflow.protocol.OFFactories;
+import org.projectfloodlight.openflow.protocol.OFOxmList;
+import org.projectfloodlight.openflow.protocol.OFVersion;
+import org.projectfloodlight.openflow.protocol.match.MatchField;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxmIpv6DstMasked;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxmIpv6SrcMasked;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxms;
+import org.projectfloodlight.openflow.types.IPv6AddressWithMask;
+
+public class OFOxmListTest {
+    private OFOxms oxms;
+
+    @Before
+    public void setup() {
+        oxms = OFFactories.getFactory(OFVersion.OF_13).oxms();
+    }
+
+    @Test
+    public void testCanonicalize() {
+        OFOxmList.Builder builder = new OFOxmList.Builder();
+        IPv6AddressWithMask fullMasked = IPv6AddressWithMask.of("::/0");
+        OFOxmIpv6DstMasked  fullMaskedOxm = oxms.ipv6DstMasked(fullMasked.getValue(), fullMasked.getMask());
+        builder.set(fullMaskedOxm);
+
+        IPv6AddressWithMask address= IPv6AddressWithMask.of("1:2:3:4:5:6::8");
+        OFOxmIpv6SrcMasked  addressSrcOxm = oxms.ipv6SrcMasked(address.getValue(), address.getMask());
+        builder.set(addressSrcOxm);
+
+        OFOxmList list = builder.build();
+        assertThat(list.get(MatchField.IPV6_DST), CoreMatchers.nullValue());
+        assertFalse(list.get(MatchField.IPV6_SRC).isMasked());
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/OFOxmTest.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/OFOxmTest.java
new file mode 100644
index 0000000..8482886
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/OFOxmTest.java
@@ -0,0 +1,62 @@
+package org.projectfloodlight.protocol;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+import org.hamcrest.CoreMatchers;
+import org.junit.Before;
+import org.junit.Test;
+import org.projectfloodlight.openflow.protocol.OFFactories;
+import org.projectfloodlight.openflow.protocol.OFVersion;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxm;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxmIpv4Src;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxmIpv4SrcMasked;
+import org.projectfloodlight.openflow.protocol.oxm.OFOxms;
+import org.projectfloodlight.openflow.types.IPv4Address;
+import org.projectfloodlight.openflow.types.IPv4AddressWithMask;
+
+public class OFOxmTest {
+    private OFOxms oxms;
+
+    @Before
+    public void setup() {
+        oxms = OFFactories.getFactory(OFVersion.OF_13).oxms();
+    }
+
+    @Test
+    public void testGetCanonicalFullMask() {
+        IPv4AddressWithMask empty = IPv4AddressWithMask.of("0.0.0.0/0");
+        assertEquals(IPv4Address.FULL_MASK, empty.getMask());
+        OFOxmIpv4SrcMasked ipv4SrcMasked = oxms.ipv4SrcMasked(empty.getValue(), empty.getMask());
+        // canonicalize should remove /0
+        assertNull(ipv4SrcMasked.getCanonical());
+    }
+
+    @Test
+    public void testGetCanonicalNoMask() {
+        IPv4AddressWithMask fullIp = IPv4AddressWithMask.of("1.2.3.4/32");
+        assertEquals(IPv4Address.NO_MASK, fullIp.getMask());
+        OFOxmIpv4SrcMasked ipv4SrcMasked = oxms.ipv4SrcMasked(fullIp.getValue(), fullIp.getMask());
+        assertTrue(ipv4SrcMasked.isMasked());
+        assertEquals(IPv4Address.NO_MASK, ipv4SrcMasked.getMask());
+
+        // canonicalize should convert the masked oxm to the non-masked one
+        OFOxm<IPv4Address> canonical = ipv4SrcMasked.getCanonical();
+        assertThat(canonical, CoreMatchers.instanceOf(OFOxmIpv4Src.class));
+        assertFalse(canonical.isMasked());
+    }
+
+    @Test
+    public void testGetCanonicalNormalMask() {
+        IPv4AddressWithMask ip = IPv4AddressWithMask.of("1.2.3.0/24");
+        OFOxmIpv4SrcMasked ipv4SrcMasked = oxms.ipv4SrcMasked(ip.getValue(), ip.getMask());
+        assertTrue(ipv4SrcMasked.isMasked());
+
+        // canonicalize should convert the masked oxm to the non-masked one
+        OFOxm<IPv4Address> canonical = ipv4SrcMasked.getCanonical();
+        assertEquals(ipv4SrcMasked, canonical);
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIteration10Test.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIteration10Test.java
new file mode 100644
index 0000000..c6f4471
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIteration10Test.java
@@ -0,0 +1,10 @@
+package org.projectfloodlight.protocol.match;
+
+import org.projectfloodlight.openflow.protocol.OFFactories;
+import org.projectfloodlight.openflow.protocol.OFVersion;
+
+public class MatchFieldIteration10Test extends MatchFieldIterationBase {
+    public MatchFieldIteration10Test() {
+        super(OFFactories.getFactory(OFVersion.OF_10));
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIteration13Test.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIteration13Test.java
new file mode 100644
index 0000000..b654a53
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIteration13Test.java
@@ -0,0 +1,10 @@
+package org.projectfloodlight.protocol.match;
+
+import org.projectfloodlight.openflow.protocol.OFFactories;
+import org.projectfloodlight.openflow.protocol.OFVersion;
+
+public class MatchFieldIteration13Test extends MatchFieldIterationBase {
+    public MatchFieldIteration13Test() {
+        super(OFFactories.getFactory(OFVersion.OF_13));
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIterationBase.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIterationBase.java
new file mode 100644
index 0000000..9c72e37
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/protocol/match/MatchFieldIterationBase.java
@@ -0,0 +1,249 @@
+package org.projectfloodlight.protocol.match;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+import java.util.Iterator;
+
+import org.junit.Test;
+import org.projectfloodlight.openflow.protocol.OFFactory;
+import org.projectfloodlight.openflow.protocol.OFVersion;
+import org.projectfloodlight.openflow.protocol.match.Match;
+import org.projectfloodlight.openflow.protocol.match.MatchField;
+import org.projectfloodlight.openflow.protocol.match.MatchFields;
+import org.projectfloodlight.openflow.types.ArpOpcode;
+import org.projectfloodlight.openflow.types.EthType;
+import org.projectfloodlight.openflow.types.IPv4Address;
+import org.projectfloodlight.openflow.types.IpProtocol;
+import org.projectfloodlight.openflow.types.MacAddress;
+import org.projectfloodlight.openflow.types.Masked;
+import org.projectfloodlight.openflow.types.OFPort;
+import org.projectfloodlight.openflow.types.TransportPort;
+
+import com.google.common.collect.Iterables;
+
+public class MatchFieldIterationBase {
+
+    private OFFactory factory;
+
+    protected MatchFieldIterationBase(OFFactory factory) {
+        this.factory = factory;
+    }
+    
+    @Test
+    public void iterateEmptyMatch() {
+        Match match = factory.buildMatch().build();
+        Iterator<MatchField<?>> iter = match.getMatchFields().iterator();
+        assertThat(iter.hasNext(), is(false));
+    }
+    
+    @Test
+    public void iterateSingleExactMatchField() {
+        OFPort port5 = OFPort.of(5);
+        Match match = factory.buildMatch()
+                .setExact(MatchField.IN_PORT, port5)
+                .build();
+        Iterator<MatchField<?>> iter = match.getMatchFields().iterator();
+        assertThat(iter.hasNext(), is(true));
+        MatchField<?> matchField = iter.next();
+        assertThat(matchField.id, is(MatchFields.IN_PORT));
+        assertThat(match.isExact(matchField), is(true));
+        @SuppressWarnings("unchecked")
+        MatchField<OFPort> portMatchField = (MatchField<OFPort>) matchField;
+        OFPort port = match.get(portMatchField);
+        assertThat(port, is(port5));
+        assertThat(iter.hasNext(), is(false));
+    }
+    
+    @SuppressWarnings("unchecked")
+    @Test
+    public void iterateExactMatchFields() {
+        OFPort port5 = OFPort.of(5);
+        MacAddress macSrc = MacAddress.of("00:01:02:03:04:05");
+        MacAddress macDst = MacAddress.of("01:01:02:02:03:03");
+        IPv4Address ipSrc = IPv4Address.of("10.192.20.1");
+        IPv4Address ipDst = IPv4Address.of("10.192.20.2");
+        TransportPort tcpSrc = TransportPort.of(100);
+        TransportPort tcpDst = TransportPort.of(200);
+        Match match = factory.buildMatch()
+                .setExact(MatchField.IN_PORT, port5)
+                .setExact(MatchField.ETH_TYPE, EthType.IPv4)
+                .setExact(MatchField.ETH_SRC, macSrc)
+                .setExact(MatchField.ETH_DST, macDst)
+                .setExact(MatchField.IP_PROTO, IpProtocol.TCP)
+                .setExact(MatchField.IPV4_SRC, ipSrc)
+                .setExact(MatchField.IPV4_DST, ipDst)
+                .setExact(MatchField.TCP_SRC, tcpSrc)
+                .setExact(MatchField.TCP_DST, tcpDst)
+                .build();
+        assertThat(Iterables.size(match.getMatchFields()), is(9));
+        for (MatchField<?> matchField: match.getMatchFields()) {
+            switch (matchField.id) {
+            case IN_PORT:
+                OFPort port = match.get((MatchField<OFPort>) matchField);
+                assertThat(port, is(port5));
+                break;
+            case ETH_TYPE:
+                EthType ethType = match.get((MatchField<EthType>) matchField);
+                assertThat(ethType, is(EthType.IPv4));
+                break;
+            case ETH_SRC:
+                MacAddress mac = match.get((MatchField<MacAddress>) matchField);
+                assertThat(mac, is(macSrc));
+                break;
+            case ETH_DST:
+                mac = match.get((MatchField<MacAddress>) matchField);
+                assertThat(mac, is(macDst));
+                break;
+            case IP_PROTO:
+                IpProtocol ipProtocol = match.get((MatchField<IpProtocol>) matchField);
+                assertThat(ipProtocol, is(IpProtocol.TCP));
+                break;
+            case IPV4_SRC:
+                IPv4Address ip = match.get((MatchField<IPv4Address>) matchField);
+                assertThat(ip, is(ipSrc));
+                break;
+            case IPV4_DST:
+                ip = match.get((MatchField<IPv4Address>) matchField);
+                assertThat(ip, is(ipDst));
+                break;
+            case TCP_SRC:
+                TransportPort tcp = match.get((MatchField<TransportPort>) matchField);
+                assertThat(tcp, is(tcpSrc));
+                break;
+            case TCP_DST:
+                tcp = match.get((MatchField<TransportPort>) matchField);
+                assertThat(tcp, is(tcpDst));
+                break;
+            default:
+                fail("Unexpected match field returned from iterator");
+            }
+        }
+    }
+    
+    @SuppressWarnings("unchecked")
+    @Test
+    public void iterateArpFields() {
+        MacAddress macSrc = MacAddress.of("00:01:02:03:04:05");
+        MacAddress macDst = MacAddress.of("01:01:02:02:03:03");
+        IPv4Address ipSrc = IPv4Address.of("10.192.20.1");
+        IPv4Address ipDst = IPv4Address.of("10.192.20.2");
+        OFVersion version = factory.getVersion();
+        boolean supportsArpHardwareAddress = (version != OFVersion.OF_10) &&
+                (version != OFVersion.OF_11) && (version != OFVersion.OF_12);
+        int matchFieldCount = 4;
+        Match.Builder builder = factory.buildMatch();
+        builder.setExact(MatchField.ETH_TYPE, EthType.ARP)
+                .setExact(MatchField.ARP_OP, ArpOpcode.REPLY)
+                .setExact(MatchField.ARP_SPA, ipSrc)
+                .setExact(MatchField.ARP_TPA, ipDst);
+        if (supportsArpHardwareAddress) {
+            builder.setExact(MatchField.ARP_SHA, macSrc);
+            builder.setExact(MatchField.ARP_THA, macDst);
+            matchFieldCount += 2;
+        }
+        Match match = builder.build();
+        assertThat(Iterables.size(match.getMatchFields()), is(matchFieldCount));
+        for (MatchField<?> matchField: match.getMatchFields()) {
+            switch (matchField.id) {
+            case ETH_TYPE:
+                EthType ethType = match.get((MatchField<EthType>) matchField);
+                assertThat(ethType, is(EthType.ARP));
+                break;
+            case ARP_OP:
+                ArpOpcode opcode = match.get((MatchField<ArpOpcode>) matchField);
+                assertThat(opcode, is(ArpOpcode.REPLY));
+                break;
+            case ARP_SHA:
+                MacAddress mac = match.get((MatchField<MacAddress>) matchField);
+                assertThat(mac, is(macSrc));
+                break;
+            case ARP_THA:
+                mac = match.get((MatchField<MacAddress>) matchField);
+                assertThat(mac, is(macDst));
+                break;
+            case ARP_SPA:
+                IPv4Address ip = match.get((MatchField<IPv4Address>) matchField);
+                assertThat(ip, is(ipSrc));
+                break;
+            case ARP_TPA:
+                ip = match.get((MatchField<IPv4Address>) matchField);
+                assertThat(ip, is(ipDst));
+                break;
+            default:
+                fail("Unexpected match field returned from iterator");
+            }
+        }
+    }
+    
+    @SuppressWarnings("unchecked")
+    @Test
+    public void iterateMaskedFields() {
+        MacAddress macSrc = MacAddress.of("01:02:03:04:00:00");
+        MacAddress macSrcMask = MacAddress.of("FF:FF:FF:FF:00:00");
+        MacAddress macDst = MacAddress.of("11:22:33:00:00:00");
+        MacAddress macDstMask = MacAddress.of("FF:FF:FF:00:00:00");
+        IPv4Address ipSrc = IPv4Address.of("10.192.20.0");
+        IPv4Address ipSrcMask = IPv4Address.of("255.255.255.0");
+        IPv4Address ipDst = IPv4Address.of("10.192.20.0");
+        IPv4Address ipDstMask = IPv4Address.of("255.255.255.128");
+        TransportPort tcpSrcMask = TransportPort.of(0x01F0);
+        OFVersion version = factory.getVersion();
+        boolean supportsAllMasks = (version != OFVersion.OF_10) &&
+                (version != OFVersion.OF_11) && (version != OFVersion.OF_12);
+        int matchFieldCount = 4;
+        Match.Builder builder = factory.buildMatch()
+                .setExact(MatchField.ETH_TYPE, EthType.IPv4)
+                .setMasked(MatchField.IPV4_SRC, ipSrc, ipSrcMask)
+                .setMasked(MatchField.IPV4_DST, ipDst, ipDstMask)
+                .setExact(MatchField.IP_PROTO, IpProtocol.TCP);
+        if (supportsAllMasks) {
+            builder.setMasked(MatchField.ETH_SRC, macSrc, macSrcMask);
+            builder.setMasked(MatchField.ETH_DST, macDst, macDstMask);
+            builder.setMasked(MatchField.TCP_SRC, tcpSrcMask, tcpSrcMask);
+            matchFieldCount += 3;
+        }
+        Match match = builder.build();
+        assertThat(Iterables.size(match.getMatchFields()), is(matchFieldCount));
+        for (MatchField<?> matchField: match.getMatchFields()) {
+            switch (matchField.id) {
+            case ETH_TYPE:
+                EthType ethType = match.get((MatchField<EthType>) matchField);
+                assertThat(ethType, is(EthType.IPv4));
+                break;
+            case ETH_SRC:
+                Masked<MacAddress> mac = match.getMasked((MatchField<MacAddress>) matchField);
+                assertThat(mac.getValue(), is(macSrc));
+                assertThat(mac.getMask(), is(macSrcMask));
+                break;
+            case ETH_DST:
+                mac = match.getMasked((MatchField<MacAddress>) matchField);
+                assertThat(mac.getValue(), is(macDst));
+                assertThat(mac.getMask(), is(macDstMask));
+                break;
+            case IP_PROTO:
+                IpProtocol ipProtocol = match.get((MatchField<IpProtocol>) matchField);
+                assertThat(ipProtocol, is(IpProtocol.TCP));
+                break;
+            case IPV4_SRC:
+                Masked<IPv4Address> ip = match.getMasked((MatchField<IPv4Address>) matchField);
+                assertThat(ip.getValue(), is(ipSrc));
+                assertThat(ip.getMask(), is(ipSrcMask));
+                break;
+            case IPV4_DST:
+                ip = match.getMasked((MatchField<IPv4Address>) matchField);
+                assertThat(ip.getValue(), is(ipDst));
+                assertThat(ip.getMask(), is(ipDstMask));
+                break;
+            case TCP_SRC:
+                Masked<TransportPort> tcp = match.getMasked((MatchField<TransportPort>) matchField);
+                assertThat(tcp.getValue(), is(tcpSrcMask));
+                assertThat(tcp.getMask(), is(tcpSrcMask));
+                break;
+            default:
+                fail("Unexpected match field returned from iterator");
+            }
+        }
+    }
+}
diff --git a/java_gen/pre-written/src/test/java/org/projectfloodlight/test/TestUtils.java b/java_gen/pre-written/src/test/java/org/projectfloodlight/test/TestUtils.java
new file mode 100644
index 0000000..7a5b8b0
--- /dev/null
+++ b/java_gen/pre-written/src/test/java/org/projectfloodlight/test/TestUtils.java
@@ -0,0 +1,62 @@
+package org.projectfloodlight.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Assert;
+
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import com.google.common.primitives.Bytes;
+
+public class TestUtils {
+     private TestUtils() {}
+
+     private static final int PER_LINE = 8;
+
+     public static void betterAssertArrayEquals(byte[] expected, byte[] got) {
+         int maxlen = Math.max(expected.length, got.length);
+
+         List<String> expectedList = formatHex(Bytes.asList(expected));
+         List<String> gotList = formatHex(Bytes.asList(got));
+
+         boolean fail = false;
+         for (int i = 0; i < maxlen;i+= PER_LINE) {
+             int maxThisLine = Math.min(maxlen, PER_LINE);
+             boolean print = false;
+
+             ArrayList<String> changeMarkers = new ArrayList<String>();
+
+             for (int j = i; j < maxThisLine; j++) {
+                 if (j >= expected.length || j >= got.length  || expected[j] != got[j]) {
+                     print = true;
+                     fail = true;
+                     changeMarkers.add("==");
+                     break;
+                 } else {
+                     changeMarkers.add("  ");
+                 }
+             }
+             if(print) {
+                System.out.println(String.format("%4x: %s", i, Joiner.on(" ").join(expectedList.subList(i, Math.min(expectedList.size(), i+PER_LINE)))));
+                System.out.println(String.format("%4x: %s", i, Joiner.on(" ").join(gotList.subList(i, Math.min(gotList.size(), i+PER_LINE)))));
+                System.out.println(String.format("%4s  %s", "", Joiner.on(" ").join(changeMarkers)));
+                System.out.println("\n");
+             }
+         }
+         if(fail) {
+             Assert.fail("Array comparison failed");
+         }
+
+     }
+
+     private static List<String> formatHex(List<Byte> b) {
+         return Lists.transform(b, new Function<Byte, String>() {
+             @Override
+             public String apply(Byte input) {
+                 return String.format("%02x", input);
+             }
+         });
+     }
+}
\ No newline at end of file
diff --git a/java_gen/pre-written/src/test/resources/logback-test.xml b/java_gen/pre-written/src/test/resources/logback-test.xml
new file mode 100644
index 0000000..e759962
--- /dev/null
+++ b/java_gen/pre-written/src/test/resources/logback-test.xml
@@ -0,0 +1,13 @@
+<configuration scan="true">
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} %level [%logger{20}:%thread] %msg%n</pattern>
+    </encoder>
+  </appender>
+  <root level="INFO">
+    <appender-ref ref="STDOUT" />
+  </root>
+  <logger name="org" level="WARN"/>
+  <logger name="LogService" level="WARN"/> <!-- Restlet access logging -->
+  <logger name="org.projectfloodlight.openflow" level="DEBUG"/>
+</configuration>
diff --git a/java_gen/templates/_autogen.java b/java_gen/templates/_autogen.java
new file mode 100644
index 0000000..e1ee006
--- /dev/null
+++ b/java_gen/templates/_autogen.java
@@ -0,0 +1,30 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http://www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import inspect, os
+// Automatically generated by LOXI from template #{os.path.basename(inspect.stack()[3][1])}
+// Do not modify
diff --git a/java_gen/templates/_copyright.java b/java_gen/templates/_copyright.java
new file mode 100644
index 0000000..37d135a
--- /dev/null
+++ b/java_gen/templates/_copyright.java
@@ -0,0 +1,32 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http://www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
+// Copyright (c) 2011, 2012 Open Networking Foundation
+// Copyright (c) 2012, 2013 Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler.
+// See the file LICENSE.txt which should have been included in the source distribution
diff --git a/java_gen/templates/_field_accessors.java b/java_gen/templates/_field_accessors.java
new file mode 100644
index 0000000..41ac66f
--- /dev/null
+++ b/java_gen/templates/_field_accessors.java
@@ -0,0 +1,49 @@
+//:: import os
+//:: for prop in msg.interface.members:
+//::    if hasattr(prop, "custom_template") and prop.custom_template != None:
+//::        getter_template_file_name = "%s/custom/%s" % (template_dir, prop.custom_template(builder=builder))
+//::    else:
+//::        getter_template_file_name = "%s/custom/%s_%s.java" % (template_dir, msg.name if not builder else msg.name + '.Builder', prop.getter_name)
+//::    #endif
+//::    if os.path.exists(getter_template_file_name):
+//::        include(getter_template_file_name, msg=msg, builder=builder, has_parent=has_parent, prop=prop)
+//::    else:
+    @Override
+    public ${prop.java_type.public_type} ${prop.getter_name}()${ "" if prop in msg.members else "throws UnsupportedOperationException"} {
+//:: if prop in msg.members:
+//::    version_prop = msg.member_by_name(prop.name)
+//::    if version_prop.is_fixed_value:
+        return ${version_prop.enum_value};
+//::    elif version_prop.is_length_value:
+        // FIXME: Hacky and inperformant way to determine a message length. Should be replaced with something better
+        ChannelBuffer c = new LengthCountingPseudoChannelBuffer();
+        WRITER.write(c, ${ "this" if not builder else "({0}) this.getMessage()".format(msg.name) });
+        return c.writerIndex();
+//::    else:
+        return ${version_prop.name};
+//::    #endif
+//:: else:
+        throw new UnsupportedOperationException("Property ${prop.name} not supported in version #{version}");
+//:: #endif
+    }
+//:: #endif
+
+//:: if generate_setters and prop.is_writeable:
+    //:: setter_template_file_name = "%s/custom/%s_%s.java" % (template_dir, msg.name if not builder else msg.name + '.Builder', prop.setter_name)
+    //:: if os.path.exists(setter_template_file_name):
+    //:: include(setter_template_file_name, msg=msg, builder=builder, has_parent=has_parent)
+
+    //:: else:
+    @Override
+    public ${msg.interface.name}.Builder ${prop.setter_name}(${prop.java_type.public_type} ${prop.name})${ "" if prop in msg.members else " throws UnsupportedOperationException"} {
+        //:: if prop in msg.members:
+        this.${prop.name} = ${prop.name};
+        this.${prop.name}Set = true;
+        return this;
+        //:: else:
+            throw new UnsupportedOperationException("Property ${prop.name} not supported in version #{version}");
+        //:: #endif
+    }
+    //:: #endif
+    //:: #endif
+//:: #endfor
diff --git a/java_gen/templates/_imports.java b/java_gen/templates/_imports.java
new file mode 100644
index 0000000..0c95916
--- /dev/null
+++ b/java_gen/templates/_imports.java
@@ -0,0 +1,31 @@
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.Map;
+import org.projectfloodlight.openflow.protocol.*;
+import org.projectfloodlight.openflow.protocol.action.*;
+import org.projectfloodlight.openflow.protocol.actionid.*;
+import org.projectfloodlight.openflow.protocol.bsntlv.*;
+import org.projectfloodlight.openflow.protocol.errormsg.*;
+import org.projectfloodlight.openflow.protocol.meterband.*;
+import org.projectfloodlight.openflow.protocol.instruction.*;
+import org.projectfloodlight.openflow.protocol.instructionid.*;
+import org.projectfloodlight.openflow.protocol.match.*;
+import org.projectfloodlight.openflow.protocol.oxm.*;
+import org.projectfloodlight.openflow.protocol.queueprop.*;
+import org.projectfloodlight.openflow.types.*;
+import org.projectfloodlight.openflow.util.*;
+import org.projectfloodlight.openflow.exceptions.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import com.google.common.collect.UnmodifiableIterator;
+import com.google.common.hash.Funnel;
+import com.google.common.hash.PrimitiveSink;
diff --git a/java_gen/templates/_singleton.java b/java_gen/templates/_singleton.java
new file mode 100644
index 0000000..8ea9f01
--- /dev/null
+++ b/java_gen/templates/_singleton.java
@@ -0,0 +1,10 @@
+
+    private ${msg.name}() {}
+
+    private final static class Holder {
+        private static final ${msg.name} INSTANCE = new ${msg.name}();
+    }
+
+    public static ${msg.name} getInstance() {
+        return Holder.INSTANCE;
+    }
diff --git a/java_gen/templates/const.java b/java_gen/templates/const.java
new file mode 100644
index 0000000..a7786f4
--- /dev/null
+++ b/java_gen/templates/const.java
@@ -0,0 +1,75 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${package};
+
+//:: include("_imports.java", msg=enum)
+
+public enum ${class_name} {
+//:: for i, entry in enumerate(enum.entries):
+//::    if enum.metadata.properties:
+//::        params = "({})".format(", ".join(entry.constructor_params))
+//::    else:
+//::        params = ""
+//::    #endif
+//::    delimiter = ", " if i < len(enum.entries)-1 else ";"
+//::    to_string_value = enum.metadata.to_string(entry) if enum.metadata.to_string else None
+//::    if to_string_value:
+     ${entry.name}${params} {
+         @Override
+         public String toString() {
+            return "${to_string_value}";
+         }
+     }${delimiter}
+//::    else:
+     ${entry.name}${params}${delimiter}
+//::    #endif
+//:: #endfor
+//:: if enum.metadata.properties:
+
+//:: for property_metadata in enum.metadata.properties:
+     private final ${property_metadata.type.public_type} ${property_metadata.variable_name};
+//:: #endfor
+
+     private ${class_name}(${", ".join("{} {}".format(m.type.public_type, m.variable_name) for m in enum.metadata.properties)}) {
+     //:: for property_metadata in enum.metadata.properties:
+        this.${property_metadata.variable_name} = ${property_metadata.variable_name};
+     //:: #endfor
+     }
+//:: for property_metadata in enum.metadata.properties:
+
+     public ${property_metadata.type.public_type} ${property_metadata.getter_name}() {
+         return ${property_metadata.variable_name};
+     }
+//:: #endfor
+//:: #endif
+}
diff --git a/java_gen/templates/const_serializer.java b/java_gen/templates/const_serializer.java
new file mode 100644
index 0000000..03ca8ac
--- /dev/null
+++ b/java_gen/templates/const_serializer.java
@@ -0,0 +1,86 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${package};
+
+//:: include('_imports.java')
+import ${enum.package}.${enum.name};
+
+public class ${class_name} {
+    //:: wire_type = enum.wire_type(version)
+    //:: int_wire_type = enum.wire_type(version).pub_type
+    //:: entries = sorted([ (entry, entry.value(version)) for entry in enum.entries if entry.has_value(version) ], lambda (_,va), (_2,vb): va.__cmp__(vb))
+
+    //:: for entry, _ in entries:
+    public final static ${int_wire_type} ${entry.name}_VAL = ${entry.format_value(version)};
+    //:: #endfor
+
+    public static ${enum.name} readFrom(ChannelBuffer bb) throws OFParseError {
+        try {
+            return ofWireValue(${wire_type.read_op(version)});
+        } catch (IllegalArgumentException e) {
+            throw new OFParseError(e);
+        }
+    }
+
+    public static void writeTo(ChannelBuffer bb, ${enum.name} e) {
+        ${wire_type.write_op(version=version, name="toWireValue(e)")};
+    }
+
+    public static void putTo(${enum.name} e, PrimitiveSink sink) {
+        ${wire_type.funnel_op(version=version, name="toWireValue(e)")};
+    }
+
+    public static ${enum.name} ofWireValue(${int_wire_type} val) {
+        switch(val) {
+        //:: for entry, _ in entries:
+            case ${entry.name}_VAL:
+                return ${enum.name}.${entry.name};
+        //:: #endfor
+            default:
+                throw new IllegalArgumentException("Illegal wire value for type ${enum.name} in version ${version}: " + val);
+        }
+    }
+
+
+    public static ${int_wire_type} toWireValue(${enum.name} e) {
+        switch(e) {
+        //:: for entry, _ in entries:
+            case ${entry.name}:
+                return ${entry.name}_VAL;
+        //:: #endfor
+            default:
+                throw new IllegalArgumentException("Illegal enum value for type ${enum.name} in version ${version}: " + e);
+        }
+    }
+
+}
diff --git a/java_gen/templates/const_set_serializer.java b/java_gen/templates/const_set_serializer.java
new file mode 100644
index 0000000..61592a6
--- /dev/null
+++ b/java_gen/templates/const_set_serializer.java
@@ -0,0 +1,107 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${package};
+
+//:: include('_imports.java')
+
+import ${enum.package}.${enum.name};
+
+public class ${class_name} {
+    //:: wire_type = enum.wire_type(version)
+    //:: int_wire_type = enum.wire_type(version).pub_type
+    //:: entries = [entry for entry in enum.entries if entry.has_value(version) ]
+
+    //:: for entry in entries:
+    public final static ${int_wire_type} ${entry.name}_VAL = ${entry.format_value(version)};
+    //:: #endfor
+
+    public static Set<${enum.name}> readFrom(ChannelBuffer bb) throws OFParseError {
+        try {
+            return ofWireValue(${wire_type.read_op(version)});
+        } catch (IllegalArgumentException e) {
+            throw new OFParseError(e);
+        }
+    }
+
+    public static void writeTo(ChannelBuffer bb, Set<${enum.name}> set) {
+        ${wire_type.write_op(version=version, name="toWireValue(set)")};
+    }
+
+    public static void putTo(Set<${enum.name}> set, PrimitiveSink sink) {
+        ${wire_type.funnel_op(version=version, name="toWireValue(set)")};
+    }
+
+
+    public static Set<${enum.name}> ofWireValue(${int_wire_type} val) {
+        EnumSet<${enum.name}> set = EnumSet.noneOf(${enum.name}.class);
+
+        //:: last_group = None
+        //:: for entry in entries:
+        //::    if entry.is_mask:
+        //::        continue
+        //::    #endif
+        //::    group = entry.masked_enum_group
+        //::    if group:
+        ${"else " if group == last_group else "" }if((val & ${group.mask}_VAL) == ${entry.name}_VAL)
+            set.add(${enum.name}.${entry.name});
+        //::       last_group = group
+        //::    else:
+        if((val & ${entry.name}_VAL) != 0)
+            set.add(${enum.name}.${entry.name});
+        //::       last_group = None
+        //::    #endif
+        //:: #endfor
+        return Collections.unmodifiableSet(set);
+    }
+
+    public static ${int_wire_type} toWireValue(Set<${enum.name}> set) {
+        ${int_wire_type} wireValue = 0;
+
+        for(${enum.name} e: set) {
+            switch(e) {
+                //:: for entry in entries:
+                //::    if entry.is_mask:
+                //::        continue
+                //::    #endif
+                case ${entry.name}:
+                    wireValue |= ${entry.name}_VAL;
+                    break;
+                //:: #endfor
+                default:
+                    throw new IllegalArgumentException("Illegal enum value for type ${enum.name} in version ${version}: " + e);
+            }
+        }
+        return wireValue;
+    }
+
+}
diff --git a/java_gen/templates/custom/OFFlowAddVer13.Builder_getActions.java b/java_gen/templates/custom/OFFlowAddVer13.Builder_getActions.java
new file mode 100644
index 0000000..ce71981
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowAddVer13.Builder_getActions.java
@@ -0,0 +1,19 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+//:: if builder:
+        if (!this.instructionsSet)
+//:: if has_parent:
+            return parentMessage.getActions();
+//:: else:
+            return Collections.emptyList();
+//:: #endif
+//:: #endif
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowAddVer13.Builder_setActions.java b/java_gen/templates/custom/OFFlowAddVer13.Builder_setActions.java
new file mode 100644
index 0000000..725de24
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowAddVer13.Builder_setActions.java
@@ -0,0 +1,9 @@
+
+    @Override
+    public OFFlowAdd.Builder setActions(List<OFAction> actions) throws UnsupportedOperationException {
+        OFInstructionApplyActionsVer13.Builder builder = new OFInstructionApplyActionsVer13.Builder();
+        builder.setActions(actions);
+        this.instructions = Collections.singletonList((OFInstruction)builder.build());
+        this.instructionsSet = true;
+        return this;
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowAddVer13_getActions.java b/java_gen/templates/custom/OFFlowAddVer13_getActions.java
new file mode 100644
index 0000000..eb7799a
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowAddVer13_getActions.java
@@ -0,0 +1,11 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowDeleteStrictVer13.Builder_getActions.java b/java_gen/templates/custom/OFFlowDeleteStrictVer13.Builder_getActions.java
new file mode 100644
index 0000000..ce71981
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowDeleteStrictVer13.Builder_getActions.java
@@ -0,0 +1,19 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+//:: if builder:
+        if (!this.instructionsSet)
+//:: if has_parent:
+            return parentMessage.getActions();
+//:: else:
+            return Collections.emptyList();
+//:: #endif
+//:: #endif
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowDeleteStrictVer13.Builder_setActions.java b/java_gen/templates/custom/OFFlowDeleteStrictVer13.Builder_setActions.java
new file mode 100644
index 0000000..b2dafe5
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowDeleteStrictVer13.Builder_setActions.java
@@ -0,0 +1,9 @@
+
+    @Override
+    public OFFlowDeleteStrict.Builder setActions(List<OFAction> actions) throws UnsupportedOperationException {
+        OFInstructionApplyActionsVer13.Builder builder = new OFInstructionApplyActionsVer13.Builder();
+        builder.setActions(actions);
+        this.instructions = Collections.singletonList((OFInstruction)builder.build());
+        this.instructionsSet = true;
+        return this;
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowDeleteStrictVer13_getActions.java b/java_gen/templates/custom/OFFlowDeleteStrictVer13_getActions.java
new file mode 100644
index 0000000..eb7799a
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowDeleteStrictVer13_getActions.java
@@ -0,0 +1,11 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowDeleteVer13.Builder_getActions.java b/java_gen/templates/custom/OFFlowDeleteVer13.Builder_getActions.java
new file mode 100644
index 0000000..ce71981
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowDeleteVer13.Builder_getActions.java
@@ -0,0 +1,19 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+//:: if builder:
+        if (!this.instructionsSet)
+//:: if has_parent:
+            return parentMessage.getActions();
+//:: else:
+            return Collections.emptyList();
+//:: #endif
+//:: #endif
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowDeleteVer13.Builder_setActions.java b/java_gen/templates/custom/OFFlowDeleteVer13.Builder_setActions.java
new file mode 100644
index 0000000..5576aaa
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowDeleteVer13.Builder_setActions.java
@@ -0,0 +1,9 @@
+
+    @Override
+    public OFFlowDelete.Builder setActions(List<OFAction> actions) throws UnsupportedOperationException {
+        OFInstructionApplyActionsVer13.Builder builder = new OFInstructionApplyActionsVer13.Builder();
+        builder.setActions(actions);
+        this.instructions = Collections.singletonList((OFInstruction)builder.build());
+        this.instructionsSet = true;
+        return this;
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowDeleteVer13_getActions.java b/java_gen/templates/custom/OFFlowDeleteVer13_getActions.java
new file mode 100644
index 0000000..eb7799a
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowDeleteVer13_getActions.java
@@ -0,0 +1,11 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowModifyStrictVer13.Builder_getActions.java b/java_gen/templates/custom/OFFlowModifyStrictVer13.Builder_getActions.java
new file mode 100644
index 0000000..ce71981
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowModifyStrictVer13.Builder_getActions.java
@@ -0,0 +1,19 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+//:: if builder:
+        if (!this.instructionsSet)
+//:: if has_parent:
+            return parentMessage.getActions();
+//:: else:
+            return Collections.emptyList();
+//:: #endif
+//:: #endif
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowModifyStrictVer13.Builder_setActions.java b/java_gen/templates/custom/OFFlowModifyStrictVer13.Builder_setActions.java
new file mode 100644
index 0000000..fc04079
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowModifyStrictVer13.Builder_setActions.java
@@ -0,0 +1,9 @@
+
+    @Override
+    public OFFlowModifyStrict.Builder setActions(List<OFAction> actions) throws UnsupportedOperationException {
+        OFInstructionApplyActionsVer13.Builder builder = new OFInstructionApplyActionsVer13.Builder();
+        builder.setActions(actions);
+        this.instructions = Collections.singletonList((OFInstruction)builder.build());
+        this.instructionsSet = true;
+        return this;
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowModifyStrictVer13_getActions.java b/java_gen/templates/custom/OFFlowModifyStrictVer13_getActions.java
new file mode 100644
index 0000000..eb7799a
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowModifyStrictVer13_getActions.java
@@ -0,0 +1,11 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowModifyVer13.Builder_getActions.java b/java_gen/templates/custom/OFFlowModifyVer13.Builder_getActions.java
new file mode 100644
index 0000000..ce71981
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowModifyVer13.Builder_getActions.java
@@ -0,0 +1,19 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+//:: if builder:
+        if (!this.instructionsSet)
+//:: if has_parent:
+            return parentMessage.getActions();
+//:: else:
+            return Collections.emptyList();
+//:: #endif
+//:: #endif
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowModifyVer13.Builder_setActions.java b/java_gen/templates/custom/OFFlowModifyVer13.Builder_setActions.java
new file mode 100644
index 0000000..695b771
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowModifyVer13.Builder_setActions.java
@@ -0,0 +1,9 @@
+
+    @Override
+    public OFFlowModify.Builder setActions(List<OFAction> actions) throws UnsupportedOperationException {
+        OFInstructionApplyActionsVer13.Builder builder = new OFInstructionApplyActionsVer13.Builder();
+        builder.setActions(actions);
+        this.instructions = Collections.singletonList((OFInstruction)builder.build());
+        this.instructionsSet = true;
+        return this;
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFFlowModifyVer13_getActions.java b/java_gen/templates/custom/OFFlowModifyVer13_getActions.java
new file mode 100644
index 0000000..eb7799a
--- /dev/null
+++ b/java_gen/templates/custom/OFFlowModifyVer13_getActions.java
@@ -0,0 +1,11 @@
+
+    @Override
+    public List<OFAction> getActions()throws UnsupportedOperationException {
+        for (OFInstruction inst : this.instructions) {
+            if (inst instanceof OFInstructionApplyActions) {
+                OFInstructionApplyActions iap = (OFInstructionApplyActions)inst;
+                return iap.getActions();
+            }
+        }
+        return Collections.emptyList();
+    }
\ No newline at end of file
diff --git a/java_gen/templates/custom/OFMatchV1Ver10.Builder.java b/java_gen/templates/custom/OFMatchV1Ver10.Builder.java
new file mode 100644
index 0000000..396e3a0
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10.Builder.java
@@ -0,0 +1,502 @@
+        @SuppressWarnings("unchecked")
+        @Override
+        public <F extends OFValueType<F>> F get(MatchField<F> field)
+                throws UnsupportedOperationException {
+            if (isFullyWildcarded(field))
+                return null;
+
+            Object result;
+            switch (field.id) {
+                case IN_PORT:
+                    result = inPort;
+                    break;
+                case ETH_DST:
+                    result = ethDst;
+                    break;
+                case ETH_SRC:
+                    result = ethSrc;
+                    break;
+                case ETH_TYPE:
+                    result = ethType;
+                    break;
+                case VLAN_VID:
+                    result = vlanVid;
+                    break;
+                case VLAN_PCP:
+                    result = vlanPcp;
+                    break;
+                case ARP_OP:
+                    result = ArpOpcode.of(ipProto.getIpProtocolNumber());
+                    break;
+                case ARP_SPA:
+                    result = ipv4Src;
+                    break;
+                case ARP_TPA:
+                    result = ipv4Dst;
+                    break;
+                case IP_DSCP:
+                    result = ipDscp;
+                    break;
+                case IP_PROTO:
+                    result = ipProto;
+                    break;
+                case IPV4_SRC:
+                    result = ipv4Src;
+                    break;
+                case IPV4_DST:
+                    result = ipv4Dst;
+                    break;
+                case TCP_SRC:
+                    result = tcpSrc;
+                    break;
+                case TCP_DST:
+                    result = tcpDst;
+                    break;
+                case UDP_SRC:
+                    result = tcpSrc;
+                    break;
+                case UDP_DST:
+                    result = tcpDst;
+                    break;
+                case SCTP_SRC:
+                    result = tcpSrc;
+                    break;
+                case SCTP_DST:
+                    result = tcpDst;
+                    break;
+                case ICMPV4_TYPE:
+                    result = tcpSrc;
+                    break;
+                case ICMPV4_CODE:
+                    result = tcpDst;
+                    break;
+                // NOT SUPPORTED:
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+            }
+            return (F)result;
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+                throws UnsupportedOperationException {
+            if (!isPartiallyMasked(field))
+                return null;
+            Object result;
+            switch (field.id) {
+                case IPV4_SRC:
+                case ARP_SPA:
+                    int srcBitMask = (-1) << (32 - getIpv4SrcCidrMaskLen());
+                    result = IPv4AddressWithMask.of(ipv4Src, IPv4Address.of(srcBitMask));
+                    break;
+                case IPV4_DST:
+                case ARP_TPA:
+                    int dstMaskedBits = Math.min(32, (wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT);
+                    int dstBitMask = (-1) << (32 - getIpv4DstCidrMaskLen());
+
+                    result = IPv4AddressWithMask.of(ipv4Dst, IPv4Address.of(dstBitMask));
+                    break;
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support masked matching on field " + field.getName());
+            }
+            return (Masked<F>)result;
+        }
+
+        @Override
+        public boolean supports(MatchField<?> field) {
+            switch (field.id) {
+                case IN_PORT:
+                case ETH_DST:
+                case ETH_SRC:
+                case ETH_TYPE:
+                case VLAN_VID:
+                case VLAN_PCP:
+                case ARP_OP:
+                case ARP_SPA:
+                case ARP_TPA:
+                case IP_DSCP:
+                case IP_PROTO:
+                case IPV4_SRC:
+                case IPV4_DST:
+                case TCP_SRC:
+                case TCP_DST:
+                case UDP_SRC:
+                case UDP_DST:
+                case SCTP_SRC:
+                case SCTP_DST:
+                case ICMPV4_TYPE:
+                case ICMPV4_CODE:
+                    return true;
+                default:
+                    return false;
+            }
+        }
+
+        @Override
+        public boolean supportsMasked(MatchField<?> field) {
+            switch (field.id) {
+                case ARP_SPA:
+                case ARP_TPA:
+                case IPV4_SRC:
+                case IPV4_DST:
+                    return true;
+                default:
+                    return false;
+            }
+        }
+
+        @Override
+        public boolean isExact(MatchField<?> field) {
+            switch (field.id) {
+                case IN_PORT:
+                    return (this.wildcards & OFPFW_IN_PORT) == 0;
+                case ETH_DST:
+                    return (this.wildcards & OFPFW_DL_DST) == 0;
+                case ETH_SRC:
+                    return (this.wildcards & OFPFW_DL_SRC) == 0;
+                case ETH_TYPE:
+                    return (this.wildcards & OFPFW_DL_TYPE) == 0;
+                case VLAN_VID:
+                    return (this.wildcards & OFPFW_DL_VLAN) == 0;
+                case VLAN_PCP:
+                    return (this.wildcards & OFPFW_DL_VLAN_PCP) == 0;
+                case ARP_OP:
+                    return (this.wildcards & OFPFW_NW_PROTO) == 0;
+                case ARP_SPA:
+                    return this.getIpv4SrcCidrMaskLen() >= 32;
+                case ARP_TPA:
+                    return this.getIpv4DstCidrMaskLen() >= 32;
+                case IP_DSCP:
+                    return (this.wildcards & OFPFW_NW_TOS) == 0;
+                case IP_PROTO:
+                    return (this.wildcards & OFPFW_NW_PROTO) == 0;
+                case IPV4_SRC:
+                    return this.getIpv4SrcCidrMaskLen() >= 32;
+                case IPV4_DST:
+                    return this.getIpv4DstCidrMaskLen() >= 32;
+                case TCP_SRC:
+                    return (this.wildcards & OFPFW_TP_SRC) == 0;
+                case TCP_DST:
+                    return (this.wildcards & OFPFW_TP_DST) == 0;
+                case UDP_SRC:
+                    return (this.wildcards & OFPFW_TP_SRC) == 0;
+                case UDP_DST:
+                    return (this.wildcards & OFPFW_TP_DST) == 0;
+                case SCTP_SRC:
+                    return (this.wildcards & OFPFW_TP_SRC) == 0;
+                case SCTP_DST:
+                    return (this.wildcards & OFPFW_TP_DST) == 0;
+                case ICMPV4_TYPE:
+                    return (this.wildcards & OFPFW_TP_SRC) == 0;
+                case ICMPV4_CODE:
+                    return (this.wildcards & OFPFW_TP_DST) == 0;
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+            }
+        }
+
+        /**
+         * Parse this match's wildcard fields and return the number of significant
+         * bits in the IP destination field. NOTE: this returns the number of bits
+         * that are fixed, i.e., like CIDR, not the number of bits that are free
+         * like OpenFlow encodes.
+         *
+         * @return A number between 0 (matches all IPs) and 32 (exact match)
+         */
+        public int getIpv4DstCidrMaskLen() {
+            return Math.max(32 - ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT),
+                            0);
+        }
+
+        /**
+         * Parse this match's wildcard fields and return the number of significant
+         * bits in the IP destination field. NOTE: this returns the number of bits
+         * that are fixed, i.e., like CIDR, not the number of bits that are free
+         * like OpenFlow encodes.
+         *
+         * @return A number between 0 (matches all IPs) and 32 (exact match)
+         */
+        public int getIpv4SrcCidrMaskLen() {
+            return Math.max(32 - ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT),
+                            0);
+        }
+
+
+        @Override
+        public boolean isFullyWildcarded(MatchField<?> field) {
+            switch (field.id) {
+                case IN_PORT:
+                    return (this.wildcards & OFPFW_IN_PORT) != 0;
+                case ETH_DST:
+                    return (this.wildcards & OFPFW_DL_DST) != 0;
+                case ETH_SRC:
+                    return (this.wildcards & OFPFW_DL_SRC) != 0;
+                case ETH_TYPE:
+                    return (this.wildcards & OFPFW_DL_TYPE) != 0;
+                case VLAN_VID:
+                    return (this.wildcards & OFPFW_DL_VLAN) != 0;
+                case VLAN_PCP:
+                    return (this.wildcards & OFPFW_DL_VLAN_PCP) != 0;
+                case ARP_OP:
+                    return (this.wildcards & OFPFW_NW_PROTO) != 0;
+                case ARP_SPA:
+                    return this.getIpv4SrcCidrMaskLen() <= 0;
+                case ARP_TPA:
+                    return this.getIpv4DstCidrMaskLen() <= 0;
+                case IP_DSCP:
+                    return (this.wildcards & OFPFW_NW_TOS) != 0;
+                case IP_PROTO:
+                    return (this.wildcards & OFPFW_NW_PROTO) != 0;
+                case TCP_SRC:
+                    return (this.wildcards & OFPFW_TP_SRC) != 0;
+                case TCP_DST:
+                    return (this.wildcards & OFPFW_TP_DST) != 0;
+                case UDP_SRC:
+                    return (this.wildcards & OFPFW_TP_SRC) != 0;
+                case UDP_DST:
+                    return (this.wildcards & OFPFW_TP_DST) != 0;
+                case SCTP_SRC:
+                    return (this.wildcards & OFPFW_TP_SRC) != 0;
+                case SCTP_DST:
+                    return (this.wildcards & OFPFW_TP_DST) != 0;
+                case ICMPV4_TYPE:
+                    return (this.wildcards & OFPFW_TP_SRC) != 0;
+                case ICMPV4_CODE:
+                    return (this.wildcards & OFPFW_TP_DST) != 0;
+                case IPV4_SRC:
+                    return this.getIpv4SrcCidrMaskLen() <= 0;
+                case IPV4_DST:
+                    return this.getIpv4DstCidrMaskLen() <= 0;
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+            }
+        }
+
+        @Override
+        public boolean isPartiallyMasked(MatchField<?> field) {
+            switch (field.id) {
+                case ARP_SPA:
+                case IPV4_SRC:
+                    int srcCidrLen = getIpv4SrcCidrMaskLen();
+                    return srcCidrLen > 0 && srcCidrLen < 32;
+                case ARP_TPA:
+                case IPV4_DST:
+                    int dstCidrLen = getIpv4DstCidrMaskLen();
+                    return dstCidrLen > 0 && dstCidrLen < 32;
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support masked matching on field " + field.getName());
+            }
+        }
+
+        private final void initWildcards() {
+            if(!wildcardsSet) {
+            //:: if has_parent:
+                wildcards = parentMessage.wildcards;
+            //:: else:
+                wildcards = OFPFW_ALL;
+            //:: #endif
+                wildcardsSet = true;
+            }
+        }
+
+        @Override
+        public <F extends OFValueType<F>> Match.Builder setExact(MatchField<F> field,
+                F value) {
+            initWildcards();
+            Object val = value;
+            switch (field.id) {
+                case ETH_DST:
+                    setEthDst((MacAddress) value);
+                    wildcards &= ~OFPFW_DL_DST;
+                    break;
+                case ETH_SRC:
+                    setEthSrc((MacAddress) value);
+                    wildcards &= ~OFPFW_DL_SRC;
+                    break;
+                case ETH_TYPE:
+                    setEthType((EthType) value);
+                    wildcards &= ~OFPFW_DL_TYPE;
+                    break;
+                case ICMPV4_CODE:
+                    setTcpDst((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_DST;
+                    break;
+                case ICMPV4_TYPE:
+                    setTcpSrc((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_SRC;
+                    break;
+                case IN_PORT:
+                    setInPort((OFPort) value);
+                    wildcards &= ~OFPFW_IN_PORT;
+                    break;
+                case ARP_OP:
+                    setIpProto(IpProtocol.of((short)((ArpOpcode)value).getOpcode()));
+                    wildcards &= ~OFPFW_NW_PROTO;
+                    break;
+                case ARP_TPA:
+                case IPV4_DST:
+                    setIpv4Dst((IPv4Address) value);
+                    wildcards &= ~OFPFW_NW_DST_MASK;
+                    break;
+                case ARP_SPA:
+                case IPV4_SRC:
+                    setIpv4Src((IPv4Address) value);
+                    wildcards &= ~OFPFW_NW_SRC_MASK;
+                    break;
+                case IP_DSCP:
+                    setIpDscp((IpDscp) value);
+                    wildcards &= ~OFPFW_NW_TOS;
+                    break;
+                case IP_PROTO:
+                    setIpProto((IpProtocol) value);
+                    wildcards &= ~OFPFW_NW_PROTO;
+                    break;
+                case SCTP_DST:
+                    setTcpDst((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_DST;
+                    break;
+                case SCTP_SRC:
+                    setTcpSrc((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_SRC;
+                    break;
+                case TCP_DST:
+                    setTcpDst((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_DST;
+                    break;
+                case TCP_SRC:
+                    setTcpSrc((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_SRC;
+                    break;
+                case UDP_DST:
+                    setTcpDst((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_DST;
+                    break;
+                case UDP_SRC:
+                    setTcpSrc((TransportPort) value);
+                    wildcards &= ~OFPFW_TP_SRC;
+                    break;
+                case VLAN_PCP:
+                    setVlanPcp((VlanPcp) value);
+                    wildcards &= ~OFPFW_DL_VLAN_PCP;
+                    break;
+                case VLAN_VID:
+                    setVlanVid((OFVlanVidMatch) value);
+                    wildcards &= ~OFPFW_DL_VLAN;
+                    break;
+                default:
+                    throw new UnsupportedOperationException(
+                            "OFMatch does not support matching on field " + field.getName());
+            }
+            return this;
+        }
+
+        @Override
+        public <F extends OFValueType<F>> Match.Builder setMasked(MatchField<F> field,
+                F value, F mask) {
+            initWildcards();
+            switch (field.id) {
+                case ARP_SPA:
+                case ARP_TPA:
+                case IPV4_DST:
+                case IPV4_SRC:
+                    Object valObj = value;
+                    Object masObj = mask;
+                    IPv4Address ip = ((IPv4Address)valObj);
+                    int maskval = ((IPv4Address)masObj).getInt();
+                    if (Integer.bitCount(~maskval + 1) != 1)
+                        throw new UnsupportedOperationException("OFMatch only supports CIDR masks for IPv4");
+                    int maskLen = 32 - Integer.bitCount(maskval);
+                    switch(field.id) {
+                        case ARP_TPA:
+                        case IPV4_DST:
+                            setIpv4Dst(ip);
+                            wildcards = (wildcards &~OFPFW_NW_DST_MASK) | (maskLen << OFPFW_NW_DST_SHIFT);
+                            break;
+                        case ARP_SPA:
+                        case IPV4_SRC:
+                            setIpv4Src(ip);
+                            wildcards = (wildcards &~OFPFW_NW_SRC_MASK) | (maskLen << OFPFW_NW_SRC_SHIFT);
+                            break;
+                        default:
+                            // Cannot really get here
+                            break;
+                    }
+                    break;
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support masked matching on field " + field.getName());
+            }
+            return this;
+        }
+
+        @Override
+        public <F extends OFValueType<F>> Match.Builder setMasked(MatchField<F> field, Masked<F> valueWithMask)
+                                                                       throws UnsupportedOperationException {
+            return this.setMasked(field, valueWithMask.getValue(), valueWithMask.getMask());
+        }
+
+        @Override
+        public <F extends OFValueType<F>> Match.Builder wildcard(MatchField<F> field) {
+            initWildcards();
+            switch (field.id) {
+                case ETH_DST:
+                    setEthDst(MacAddress.NONE);
+                    wildcards |= OFPFW_DL_DST;
+                    break;
+                case ETH_SRC:
+                    setEthSrc(MacAddress.NONE);
+                    wildcards |= OFPFW_DL_SRC;
+                    break;
+                case ETH_TYPE:
+                    setEthType(EthType.NONE);
+                    wildcards |= OFPFW_DL_TYPE;
+                    break;
+                case ICMPV4_CODE:
+                case TCP_DST:
+                case UDP_DST:
+                case SCTP_DST:
+                    setTcpDst(TransportPort.NONE);
+                    wildcards |= OFPFW_TP_DST;
+                    break;
+                case ICMPV4_TYPE:
+                case TCP_SRC:
+                case UDP_SRC:
+                case SCTP_SRC:
+                    setTcpSrc(TransportPort.NONE);
+                    wildcards |= OFPFW_TP_SRC;
+                    break;
+                case IN_PORT:
+                    setInPort(OFPort.of(0)); // NOTE: not 'NONE' -- that is 0xFF for ports
+                    wildcards |= OFPFW_IN_PORT;
+                    break;
+                case ARP_TPA:
+                case IPV4_DST:
+                    setIpv4Dst(IPv4Address.NONE);
+                    wildcards |= OFPFW_NW_DST_MASK;
+                    break;
+                case ARP_SPA:
+                case IPV4_SRC:
+                    setIpv4Src(IPv4Address.NONE);
+                    wildcards |= OFPFW_NW_SRC_MASK;
+                    break;
+                case IP_DSCP:
+                    setIpDscp(IpDscp.NONE);
+                    wildcards |= OFPFW_NW_TOS;
+                    break;
+                case IP_PROTO:
+                    setIpProto(IpProtocol.NONE);
+                    wildcards |= OFPFW_NW_PROTO;
+                    break;
+                case VLAN_PCP:
+                    setVlanPcp(VlanPcp.NONE);
+                    wildcards |= OFPFW_DL_VLAN_PCP;
+                    break;
+                case VLAN_VID:
+                    setVlanVid(OFVlanVidMatch.NONE);
+                    wildcards |= OFPFW_DL_VLAN;
+                    break;
+                default:
+                    throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+            }
+            return this;
+        }
diff --git a/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_clear_wildcards_stanza.java b/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_clear_wildcards_stanza.java
new file mode 100644
index 0000000..9528ec0
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_clear_wildcards_stanza.java
@@ -0,0 +1,53 @@
+            // normalize wildcard fields to mimic old OpenVSwitch behavior. When prerequisites for a field were not met
+            // e.g., eth_type is not set to 0x800, old OVS would set the value of the corresponding ignored fields (e.g.,
+            // ip_src, tcp_dst) to 0, AND ALSO SET THE WILDCARD to 0. It doesn't do that any more as of 1.1.2 and 1.4
+            if(ethType.equals(EthType.IPv4)) {
+                // IP
+                if(ipProto.equals(IpProtocol.TCP) || ipProto.equals(IpProtocol.UDP) || ipProto.equals(IpProtocol.ICMP)) {
+                    // fully speced, wildcards and all values are fine
+                    // normalize 32-63 ipv4 src 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_SRC_ALL) != 0)
+                        wildcards |= OFPFW_NW_SRC_MASK;
+
+                    // normalize 32-63 ipv4 dst 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_DST_ALL) != 0)
+                        wildcards |= OFPFW_NW_DST_MASK;
+
+                } else {
+                    // normalize 32-63 ipv4 src 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_SRC_ALL) != 0)
+                        wildcards |= OFPFW_NW_SRC_MASK;
+
+                    // normalize 32-63 ipv4 dst 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_DST_ALL) != 0)
+                        wildcards |= OFPFW_NW_DST_MASK;
+
+                    // not TCP/UDP/ICMP -> Clear TP wildcards for the wire
+                    wildcards &= ~(OFPFW_TP_SRC | OFPFW_TP_DST);
+                    tcpSrc = TransportPort.NONE;
+                    tcpDst = TransportPort.NONE;
+                }
+            } else if (ethType.equals(EthType.ARP)) {
+                // normalize 32-63 ipv4 src 'mask' to a full bitmask
+                if((wildcards & OFPFW_NW_SRC_ALL) != 0)
+                    wildcards |= OFPFW_NW_SRC_MASK;
+
+                // normalize 32-63 ipv4 dst 'mask' to a full bitmask
+                if((wildcards & OFPFW_NW_DST_ALL) != 0)
+                    wildcards |= OFPFW_NW_DST_MASK;
+
+                // ARP: clear NW_TOS / TP wildcards for the wire
+                wildcards &= ~( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST);
+                ipDscp = IpDscp.NONE;
+                tcpSrc = TransportPort.NONE;
+                tcpDst = TransportPort.NONE;
+            } else {
+                // not even IP. Clear NW/TP wildcards for the wire
+                wildcards &= ~( OFPFW_NW_TOS | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_TP_SRC | OFPFW_TP_DST);
+                ipDscp = IpDscp.NONE;
+                ipProto = IpProtocol.NONE;
+                ipv4Src = IPv4Address.NONE;
+                ipv4Dst = IPv4Address.NONE;
+                tcpSrc = TransportPort.NONE;
+                tcpDst = TransportPort.NONE;
+            }
diff --git a/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_set_wildcards_stanza.java b/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_set_wildcards_stanza.java
new file mode 100644
index 0000000..09cb411
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_set_wildcards_stanza.java
@@ -0,0 +1,53 @@
+            // normalize match fields according to current OpenVSwitch behavior. When prerequisites for a field are not met
+            // e.g., eth_type is not set to 0x800, OVS sets the value of corresponding ignored fields (e.g.,
+            // ip_src, tcp_dst) to 0, and sets the wildcard bit to 1.
+            if(ethType.equals(EthType.IPv4)) {
+                // IP
+                if(ipProto.equals(IpProtocol.TCP) || ipProto.equals(IpProtocol.UDP) || ipProto.equals(IpProtocol.ICMP)) {
+                    // fully speced, wildcards and all values are fine
+                    // normalize 32-63 ipv4 src 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_SRC_ALL) != 0)
+                        wildcards |= OFPFW_NW_SRC_MASK;
+
+                    // normalize 32-63 ipv4 dst 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_DST_ALL) != 0)
+                        wildcards |= OFPFW_NW_DST_MASK;
+
+                } else {
+                    // normalize 32-63 ipv4 src 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_SRC_ALL) != 0)
+                        wildcards |= OFPFW_NW_SRC_MASK;
+
+                    // normalize 32-63 ipv4 dst 'mask' to a full bitmask
+                    if((wildcards & OFPFW_NW_DST_ALL) != 0)
+                        wildcards |= OFPFW_NW_DST_MASK;
+
+                    // not TCP/UDP/ICMP -> Clear TP wildcards for the wire
+                    wildcards |= (OFPFW_TP_SRC | OFPFW_TP_DST);
+                    tcpSrc = TransportPort.NONE;
+                    tcpDst = TransportPort.NONE;
+                }
+            } else if (ethType.equals(EthType.ARP)) {
+                // normalize 32-63 ipv4 src 'mask' to a full bitmask
+                if((wildcards & OFPFW_NW_SRC_ALL) != 0)
+                    wildcards |= OFPFW_NW_SRC_MASK;
+
+                // normalize 32-63 ipv4 dst 'mask' to a full bitmask
+                if((wildcards & OFPFW_NW_DST_ALL) != 0)
+                    wildcards |= OFPFW_NW_DST_MASK;
+
+                // ARP: clear NW_TOS / TP wildcards for the wire
+                wildcards |= ( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST);
+                ipDscp = IpDscp.NONE;
+                tcpSrc = TransportPort.NONE;
+                tcpDst = TransportPort.NONE;
+            } else {
+                // not even IP. Clear NW/TP wildcards for the wire
+                wildcards |= ( OFPFW_NW_TOS | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_TP_SRC | OFPFW_TP_DST);
+                ipDscp = IpDscp.NONE;
+                ipProto = IpProtocol.NONE;
+                ipv4Src = IPv4Address.NONE;
+                ipv4Dst = IPv4Address.NONE;
+                tcpSrc = TransportPort.NONE;
+                tcpDst = TransportPort.NONE;
+            }
diff --git a/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_stanza.java b/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_stanza.java
new file mode 100644
index 0000000..3050563
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10.Builder_normalize_stanza.java
@@ -0,0 +1 @@
+//:: include("custom/%s.Builder_normalize_set_wildcards_stanza.java" % msg.name, msg=msg, has_parent=False)
diff --git a/java_gen/templates/custom/OFMatchV1Ver10.Reader_normalize_stanza.java b/java_gen/templates/custom/OFMatchV1Ver10.Reader_normalize_stanza.java
new file mode 100644
index 0000000..1de18df
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10.Reader_normalize_stanza.java
@@ -0,0 +1 @@
+//:: include("custom/%s.Builder_normalize_stanza.java" % msg.name, msg=msg, has_parent=False)
diff --git a/java_gen/templates/custom/OFMatchV1Ver10.java b/java_gen/templates/custom/OFMatchV1Ver10.java
new file mode 100644
index 0000000..8a24b2f
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10.java
@@ -0,0 +1,410 @@
+    final public static int OFPFW_ALL = ((1 << 22) - 1);
+
+    final public static int OFPFW_IN_PORT = 1 << 0; /* Switch input port. */
+    final public static int OFPFW_DL_VLAN = 1 << 1; /* VLAN id. */
+    final public static int OFPFW_DL_SRC = 1 << 2; /* Ethernet source address. */
+    final public static int OFPFW_DL_DST = 1 << 3; /*
+                                                    * Ethernet destination
+                                                    * address.
+                                                    */
+    final public static int OFPFW_DL_TYPE = 1 << 4; /* Ethernet frame type. */
+    final public static int OFPFW_NW_PROTO = 1 << 5; /* IP protocol. */
+    final public static int OFPFW_TP_SRC = 1 << 6; /* TCP/UDP source port. */
+    final public static int OFPFW_TP_DST = 1 << 7; /* TCP/UDP destination port. */
+
+    /*
+     * IP source address wildcard bit count. 0 is exact match, 1 ignores the
+     * LSB, 2 ignores the 2 least-significant bits, ..., 32 and higher wildcard
+     * the entire field. This is the *opposite* of the usual convention where
+     * e.g. /24 indicates that 8 bits (not 24 bits) are wildcarded.
+     */
+    final public static int OFPFW_NW_SRC_SHIFT = 8;
+    final public static int OFPFW_NW_SRC_BITS = 6;
+    final public static int OFPFW_NW_SRC_MASK = ((1 << OFPFW_NW_SRC_BITS) - 1) << OFPFW_NW_SRC_SHIFT;
+    final public static int OFPFW_NW_SRC_ALL = 32 << OFPFW_NW_SRC_SHIFT;
+
+    /* IP destination address wildcard bit count. Same format as source. */
+    final public static int OFPFW_NW_DST_SHIFT = 14;
+    final public static int OFPFW_NW_DST_BITS = 6;
+    final public static int OFPFW_NW_DST_MASK = ((1 << OFPFW_NW_DST_BITS) - 1) << OFPFW_NW_DST_SHIFT;
+    final public static int OFPFW_NW_DST_ALL = 32 << OFPFW_NW_DST_SHIFT;
+
+    final public static int OFPFW_DL_VLAN_PCP = 1 << 20; /* VLAN priority. */
+    final public static int OFPFW_NW_TOS = 1 << 21; /* IP ToS (DSCP field, 6bits) */
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        if (isFullyWildcarded(field))
+            return null;
+        if (!field.arePrerequisitesOK(this))
+            return null;
+
+        Object result;
+        switch (field.id) {
+            case IN_PORT:
+                result = inPort;
+                break;
+            case ETH_DST:
+                result = ethDst;
+                break;
+            case ETH_SRC:
+                result = ethSrc;
+                break;
+            case ETH_TYPE:
+                result = ethType;
+                break;
+            case VLAN_VID:
+                result = vlanVid;
+                break;
+            case VLAN_PCP:
+                result = vlanPcp;
+                break;
+            case ARP_OP:
+                result = ArpOpcode.of(ipProto.getIpProtocolNumber());
+                break;
+            case ARP_SPA:
+                result = ipv4Src;
+                break;
+            case ARP_TPA:
+                result = ipv4Dst;
+                break;
+            case IP_DSCP:
+                result = ipDscp;
+                break;
+            case IP_PROTO:
+                result = ipProto;
+                break;
+            case IPV4_SRC:
+                result = ipv4Src;
+                break;
+            case IPV4_DST:
+                result = ipv4Dst;
+                break;
+            case TCP_SRC:
+                result = tcpSrc;
+                break;
+            case TCP_DST:
+                result = tcpDst;
+                break;
+            case UDP_SRC:
+                result = tcpSrc;
+                break;
+            case UDP_DST:
+                result = tcpDst;
+                break;
+            case SCTP_SRC:
+                result = tcpSrc;
+                break;
+            case SCTP_DST:
+                result = tcpDst;
+                break;
+            case ICMPV4_TYPE:
+                result = tcpSrc;
+                break;
+            case ICMPV4_CODE:
+                result = tcpDst;
+                break;
+            // NOT SUPPORTED:
+            default:
+                throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+        }
+        return (F)result;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        if (!isPartiallyMasked(field))
+            return null;
+        if (!field.arePrerequisitesOK(this))
+            return null;
+        Object result;
+        switch (field.id) {
+            case ARP_SPA:
+            case IPV4_SRC:
+                int srcBitMask = (-1) << (32 - getIpv4SrcCidrMaskLen());
+                result = IPv4AddressWithMask.of(ipv4Src, IPv4Address.of(srcBitMask));
+                break;
+            case ARP_TPA:
+            case IPV4_DST:
+                int dstBitMask = (-1) << (32 - getIpv4DstCidrMaskLen());
+
+                result = IPv4AddressWithMask.of(ipv4Dst, IPv4Address.of(dstBitMask));
+                break;
+            default:
+                throw new UnsupportedOperationException("OFMatch does not support masked matching on field " + field.getName());
+        }
+        return (Masked<F>)result;
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        switch (field.id) {
+            case IN_PORT:
+            case ETH_DST:
+            case ETH_SRC:
+            case ETH_TYPE:
+            case VLAN_VID:
+            case VLAN_PCP:
+            case ARP_OP:
+            case ARP_SPA:
+            case ARP_TPA:
+            case IP_DSCP:
+            case IP_PROTO:
+            case IPV4_SRC:
+            case IPV4_DST:
+            case TCP_SRC:
+            case TCP_DST:
+            case UDP_SRC:
+            case UDP_DST:
+            case SCTP_SRC:
+            case SCTP_DST:
+            case ICMPV4_TYPE:
+            case ICMPV4_CODE:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        switch (field.id) {
+            case ARP_SPA:
+            case ARP_TPA:
+            case IPV4_SRC:
+            case IPV4_DST:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public boolean isExact(MatchField<?> field) {
+        if (!field.arePrerequisitesOK(this))
+            return false;
+
+        switch (field.id) {
+            case IN_PORT:
+                return (this.wildcards & OFPFW_IN_PORT) == 0;
+            case ETH_DST:
+                return (this.wildcards & OFPFW_DL_DST) == 0;
+            case ETH_SRC:
+                return (this.wildcards & OFPFW_DL_SRC) == 0;
+            case ETH_TYPE:
+                return (this.wildcards & OFPFW_DL_TYPE) == 0;
+            case VLAN_VID:
+                return (this.wildcards & OFPFW_DL_VLAN) == 0;
+            case VLAN_PCP:
+                return (this.wildcards & OFPFW_DL_VLAN_PCP) == 0;
+            case ARP_OP:
+                return (this.wildcards & OFPFW_NW_PROTO) == 0;
+            case ARP_SPA:
+                return this.getIpv4SrcCidrMaskLen() >= 32;
+            case ARP_TPA:
+                return this.getIpv4DstCidrMaskLen() >= 32;
+            case IP_DSCP:
+                return (this.wildcards & OFPFW_NW_TOS) == 0;
+            case IP_PROTO:
+                return (this.wildcards & OFPFW_NW_PROTO) == 0;
+            case IPV4_SRC:
+                return this.getIpv4SrcCidrMaskLen() >= 32;
+            case IPV4_DST:
+                return this.getIpv4DstCidrMaskLen() >= 32;
+            case TCP_SRC:
+                return (this.wildcards & OFPFW_TP_SRC) == 0;
+            case TCP_DST:
+                return (this.wildcards & OFPFW_TP_DST) == 0;
+            case UDP_SRC:
+                return (this.wildcards & OFPFW_TP_SRC) == 0;
+            case UDP_DST:
+                return (this.wildcards & OFPFW_TP_DST) == 0;
+            case SCTP_SRC:
+                return (this.wildcards & OFPFW_TP_SRC) == 0;
+            case SCTP_DST:
+                return (this.wildcards & OFPFW_TP_DST) == 0;
+            case ICMPV4_TYPE:
+                return (this.wildcards & OFPFW_TP_SRC) == 0;
+            case ICMPV4_CODE:
+                return (this.wildcards & OFPFW_TP_DST) == 0;
+            default:
+                throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+        }
+    }
+
+    /**
+     * Parse this match's wildcard fields and return the number of significant
+     * bits in the IP destination field. NOTE: this returns the number of bits
+     * that are fixed, i.e., like CIDR, not the number of bits that are free
+     * like OpenFlow encodes.
+     *
+     * @return A number between 0 (matches all IPs) and 32 (exact match)
+     */
+    public int getIpv4DstCidrMaskLen() {
+        return Math.max(32 - ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT),
+                        0);
+    }
+
+    /**
+     * Parse this match's wildcard fields and return the number of significant
+     * bits in the IP destination field. NOTE: this returns the number of bits
+     * that are fixed, i.e., like CIDR, not the number of bits that are free
+     * like OpenFlow encodes.
+     *
+     * @return A number between 0 (matches all IPs) and 32 (exact match)
+     */
+    public int getIpv4SrcCidrMaskLen() {
+        return Math.max(32 - ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT),
+                        0);
+    }
+
+
+    @Override
+    public boolean isFullyWildcarded(MatchField<?> field) {
+        if (!field.arePrerequisitesOK(this))
+            return true;
+
+        switch (field.id) {
+            case IN_PORT:
+                return (this.wildcards & OFPFW_IN_PORT) != 0;
+            case ETH_DST:
+                return (this.wildcards & OFPFW_DL_DST) != 0;
+            case ETH_SRC:
+                return (this.wildcards & OFPFW_DL_SRC) != 0;
+            case ETH_TYPE:
+                return (this.wildcards & OFPFW_DL_TYPE) != 0;
+            case VLAN_VID:
+                return (this.wildcards & OFPFW_DL_VLAN) != 0;
+            case VLAN_PCP:
+                return (this.wildcards & OFPFW_DL_VLAN_PCP) != 0;
+            case ARP_OP:
+                return (this.wildcards & OFPFW_NW_PROTO) != 0;
+            case ARP_SPA:
+                return this.getIpv4SrcCidrMaskLen() <= 0;
+            case ARP_TPA:
+                return this.getIpv4DstCidrMaskLen() <= 0;
+            case IP_DSCP:
+                return (this.wildcards & OFPFW_NW_TOS) != 0;
+            case IP_PROTO:
+                return (this.wildcards & OFPFW_NW_PROTO) != 0;
+            case TCP_SRC:
+                return (this.wildcards & OFPFW_TP_SRC) != 0;
+            case TCP_DST:
+                return (this.wildcards & OFPFW_TP_DST) != 0;
+            case UDP_SRC:
+                return (this.wildcards & OFPFW_TP_SRC) != 0;
+            case UDP_DST:
+                return (this.wildcards & OFPFW_TP_DST) != 0;
+            case SCTP_SRC:
+                return (this.wildcards & OFPFW_TP_SRC) != 0;
+            case SCTP_DST:
+                return (this.wildcards & OFPFW_TP_DST) != 0;
+            case ICMPV4_TYPE:
+                return (this.wildcards & OFPFW_TP_SRC) != 0;
+            case ICMPV4_CODE:
+                return (this.wildcards & OFPFW_TP_DST) != 0;
+            case IPV4_SRC:
+                return this.getIpv4SrcCidrMaskLen() <= 0;
+            case IPV4_DST:
+                return this.getIpv4DstCidrMaskLen() <= 0;
+            default:
+                throw new UnsupportedOperationException("OFMatch does not support matching on field " + field.getName());
+        }
+    }
+
+    @Override
+    public boolean isPartiallyMasked(MatchField<?> field) {
+        if (!field.arePrerequisitesOK(this))
+            return false;
+
+        switch (field.id) {
+            case ARP_SPA:
+            case IPV4_SRC:
+                int srcCidrLen = getIpv4SrcCidrMaskLen();
+                return srcCidrLen > 0 && srcCidrLen < 32;
+            case ARP_TPA:
+            case IPV4_DST:
+                int dstCidrLen = getIpv4DstCidrMaskLen();
+                return dstCidrLen > 0 && dstCidrLen < 32;
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public Iterable<MatchField<?>> getMatchFields() {
+        ImmutableList.Builder<MatchField<?>> builder = ImmutableList.builder();
+        if ((wildcards & OFPFW_IN_PORT) == 0)
+            builder.add(MatchField.IN_PORT);
+        if ((wildcards & OFPFW_DL_VLAN) == 0)
+            builder.add(MatchField.VLAN_VID);
+        if ((wildcards & OFPFW_DL_SRC) == 0)
+            builder.add(MatchField.ETH_SRC);
+        if ((wildcards & OFPFW_DL_DST) == 0)
+            builder.add(MatchField.ETH_DST);
+        if ((wildcards & OFPFW_DL_TYPE) == 0)
+            builder.add(MatchField.ETH_TYPE);
+        if ((wildcards & OFPFW_NW_PROTO) == 0) {
+            if (ethType == EthType.ARP) {
+                builder.add(MatchField.ARP_OP);
+            } else if (ethType == EthType.IPv4) {
+                builder.add(MatchField.IP_PROTO);
+            } else {
+                throw new UnsupportedOperationException(
+                        "Unsupported Ethertype for matching on network protocol " + ethType);
+            }
+        }
+        if ((wildcards & OFPFW_TP_SRC) == 0) {
+            if (ipProto == IpProtocol.UDP) {
+                builder.add(MatchField.UDP_SRC);
+            } else if (ipProto == IpProtocol.TCP) {
+                builder.add(MatchField.TCP_SRC);
+            } else if (ipProto == IpProtocol.SCTP) {
+                builder.add(MatchField.SCTP_SRC);
+            } else {
+                throw new UnsupportedOperationException(
+                        "Unsupported IP protocol for matching on source port " + ipProto);
+            }
+        }
+        if ((wildcards & OFPFW_TP_DST) == 0) {
+            if (ipProto == IpProtocol.UDP) {
+                builder.add(MatchField.UDP_DST);
+            } else if (ipProto == IpProtocol.TCP) {
+                builder.add(MatchField.TCP_DST);
+            } else if (ipProto == IpProtocol.SCTP) {
+                builder.add(MatchField.SCTP_DST);
+            } else {
+                throw new UnsupportedOperationException(
+                        "Unsupported IP protocol for matching on destination port " + ipProto);
+            }
+        }
+        if (((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT) < 32) {
+            if (ethType == EthType.ARP) {
+                builder.add(MatchField.ARP_SPA);
+            } else if (ethType == EthType.IPv4) {
+                builder.add(MatchField.IPV4_SRC);
+            } else {
+                throw new UnsupportedOperationException(
+                        "Unsupported Ethertype for matching on source IP " + ethType);
+            }
+        }
+        if (((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT) < 32) {
+            if (ethType == EthType.ARP) {
+                builder.add(MatchField.ARP_TPA);
+            } else if (ethType == EthType.IPv4) {
+                builder.add(MatchField.IPV4_DST);
+            } else {
+                throw new UnsupportedOperationException(
+                        "Unsupported Ethertype for matching on destination IP " + ethType);
+            }
+        }
+        if ((wildcards & OFPFW_DL_VLAN_PCP) == 0)
+            builder.add(MatchField.VLAN_PCP);
+        if ((wildcards & OFPFW_NW_TOS) == 0)
+            builder.add(MatchField.IP_DSCP);
+        return builder.build();
+    }
diff --git a/java_gen/templates/custom/OFMatchV1Ver10_toString.java b/java_gen/templates/custom/OFMatchV1Ver10_toString.java
new file mode 100644
index 0000000..3b2783b
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV1Ver10_toString.java
@@ -0,0 +1 @@
+//:: include("custom/OFMatch_toString.java", msg=msg, has_parent=False)
diff --git a/java_gen/templates/custom/OFMatchV2Ver11.Builder.java b/java_gen/templates/custom/OFMatchV2Ver11.Builder.java
new file mode 100644
index 0000000..6570df8
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV2Ver11.Builder.java
@@ -0,0 +1,71 @@
+
+    @Override
+    public <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean isExact(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean isFullyWildcarded(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean isPartiallyMasked(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder setExact(
+            MatchField<F> field, F value) {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder setMasked(
+            MatchField<F> field, F value, F mask) {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder setMasked(
+            MatchField<F> field, Masked<F> valueWithMask) {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder wildcard(MatchField<F> field) {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
diff --git a/java_gen/templates/custom/OFMatchV2Ver11.java b/java_gen/templates/custom/OFMatchV2Ver11.java
new file mode 100644
index 0000000..ef79ffb
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV2Ver11.java
@@ -0,0 +1,49 @@
+
+    @Override
+    public <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        // FIXME yotam - please replace with real implementation
+        return null;
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean isExact(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean isFullyWildcarded(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public boolean isPartiallyMasked(MatchField<?> field) {
+        // FIXME yotam - please replace with real implementation
+        return false;
+    }
+
+    @Override
+    public Iterable<MatchField<?>> getMatchFields() {
+        throw new UnsupportedOperationException();
+    }
diff --git a/java_gen/templates/custom/OFMatchV3Ver12.Builder.java b/java_gen/templates/custom/OFMatchV3Ver12.Builder.java
new file mode 100644
index 0000000..3fae367
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV3Ver12.Builder.java
@@ -0,0 +1,106 @@
+
+    private OFOxmList.Builder oxmListBuilder;
+
+    private synchronized void initBuilder() {
+        if (oxmListBuilder != null)
+            return;
+        oxmListBuilder = new OFOxmList.Builder();
+    }
+
+    private synchronized void updateOxmList() {
+        this.oxmList = this.oxmListBuilder.build();
+        this.oxmListSet = true;
+    }
+
+    private <F extends OFValueType<F>> OFOxm<F> getOxm(MatchField<F> field) {
+//:: if has_parent:
+        return this.oxmListSet ? this.oxmList.get(field) : parentMessage.oxmList.get(field);
+//:: else:
+        return this.oxmListSet ? this.oxmList.get(field) : null;
+//:: #endif
+    }
+
+    @Override
+    public synchronized <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        OFOxm<F> value = getOxm(field);
+        if (value == null)
+            return null;
+        return value.getValue();
+    }
+
+    @Override
+    public synchronized <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        OFOxm<F> value = getOxm(field);
+        if (value == null || !value.isMasked())
+            return null;
+        // TODO: If changing OXMs to extend Masked, then use it here
+        return Masked.of(value.getValue(), value.getMask());
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public synchronized boolean isExact(MatchField<?> field) {
+        OFOxm<?> value = getOxm(field);
+        return (value != null && !value.isMasked());
+    }
+
+    @Override
+    public synchronized boolean isFullyWildcarded(MatchField<?> field) {
+        OFOxm<?> value = getOxm(field);
+        return (value == null);
+    }
+
+    @Override
+    public synchronized boolean isPartiallyMasked(MatchField<?> field) {
+        OFOxm<?> value = getOxm(field);
+        return (value != null && value.isMasked());
+    }
+
+    @Override
+    public synchronized <F extends OFValueType<F>> Match.Builder setExact(
+            MatchField<F> field, F value) {
+        initBuilder();
+        OFOxm<F> oxm = OFFactories.getFactory(OFVersion.OF_13).oxms().fromValue(value, field);
+        this.oxmListBuilder.set(oxm);
+        updateOxmList();
+        return this;
+    }
+
+    @Override
+    public synchronized <F extends OFValueType<F>> Match.Builder setMasked(
+            MatchField<F> field, F value, F mask) {
+        initBuilder();
+        OFOxm<F> oxm = OFFactories.getFactory(OFVersion.OF_13).oxms().fromValueAndMask(value, mask, field);
+        this.oxmListBuilder.set(oxm);
+        updateOxmList();
+        return this;
+    }
+
+    @Override
+    public synchronized <F extends OFValueType<F>> Match.Builder setMasked(
+            MatchField<F> field, Masked<F> valueWithMask) {
+        initBuilder();
+        OFOxm<F> oxm = OFFactories.getFactory(OFVersion.OF_13).oxms().fromMasked(valueWithMask, field);
+        this.oxmListBuilder.set(oxm);
+        updateOxmList();
+        return this;
+    }
+
+    @Override
+    public synchronized <F extends OFValueType<F>> Match.Builder wildcard(MatchField<F> field) {
+        initBuilder();
+        this.oxmListBuilder.unset(field);
+        updateOxmList();
+        return this;
+    }
diff --git a/java_gen/templates/custom/OFMatchV3Ver12.java b/java_gen/templates/custom/OFMatchV3Ver12.java
new file mode 100644
index 0000000..81092c1
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV3Ver12.java
@@ -0,0 +1,114 @@
+
+    @Override
+    public <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<F> oxm = this.oxmList.get(field);
+
+        if (oxm == null || !field.arePrerequisitesOK(this))
+            return null;
+
+        return oxm.getValue();
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        if (!supportsMasked(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support masked matching on field " + field.getName());
+
+        OFOxm<F> oxm = this.oxmList.get(field);
+
+        if (oxm == null || !field.arePrerequisitesOK(this))
+            return null;
+
+        if (oxm.getMask() == null)
+            return null;
+
+        // TODO: Make OfOxm extend Masked and just return the OXM?
+        return Masked.of(oxm.getValue(), oxm.getMask());
+    }
+
+    private static boolean supportsField(MatchField<?> field) {
+        switch (field.id) {
+            case IN_PORT:
+            case IN_PHY_PORT:
+            case METADATA:
+            case ETH_DST:
+            case ETH_SRC:
+            case ETH_TYPE:
+            case VLAN_VID:
+            case VLAN_PCP:
+            case IP_DSCP:
+            case IP_ECN:
+            case IP_PROTO:
+            case IPV4_SRC:
+            case IPV4_DST:
+            case TCP_SRC:
+            case TCP_DST:
+            case UDP_SRC:
+            case UDP_DST:
+            case SCTP_SRC:
+            case SCTP_DST:
+            case ICMPV4_TYPE:
+            case ICMPV4_CODE:
+            case ARP_OP:
+            case ARP_SPA:
+            case ARP_TPA:
+            case ARP_SHA:
+            case ARP_THA:
+            case IPV6_SRC:
+            case IPV6_DST:
+            case IPV6_FLABEL:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean isExact(MatchField<?> field) {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<?> oxm = this.oxmList.get(field);
+
+        return oxm != null && !oxm.isMasked();
+    }
+
+    @Override
+    public boolean isFullyWildcarded(MatchField<?> field) {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<?> oxm = this.oxmList.get(field);
+
+        return oxm == null;
+    }
+
+    @Override
+    public boolean isPartiallyMasked(MatchField<?> field) {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<?> oxm = this.oxmList.get(field);
+
+        return oxm != null && oxm.isMasked();
+    }
+
+    @Override
+    public Iterable<MatchField<?>> getMatchFields() {
+        throw new UnsupportedOperationException();
+    }
diff --git a/java_gen/templates/custom/OFMatchV3Ver13.Builder.java b/java_gen/templates/custom/OFMatchV3Ver13.Builder.java
new file mode 100644
index 0000000..79cbdbc
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV3Ver13.Builder.java
@@ -0,0 +1,107 @@
+
+    private OFOxmList.Builder oxmListBuilder;
+
+    private void initBuilder() {
+        if (oxmListBuilder != null)
+            return;
+        oxmListBuilder = new OFOxmList.Builder();
+    }
+
+    private void updateOxmList() {
+        this.oxmList = this.oxmListBuilder.build();
+        this.oxmListSet = true;
+    }
+
+    private <F extends OFValueType<F>> OFOxm<F> getOxm(MatchField<F> field) {
+//:: if has_parent:
+        return this.oxmListSet ? this.oxmList.get(field) : parentMessage.oxmList.get(field);
+//:: else:
+        return this.oxmListSet ? this.oxmList.get(field) : null;
+//:: #endif
+    }
+
+    @Override
+    public <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        OFOxm<F> value = getOxm(field);
+        if (value == null)
+            return null;
+        return value.getValue();
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        OFOxm<F> value = getOxm(field);
+        if (value == null || !value.isMasked())
+            return null;
+        // TODO: If changing OXMs to extend Masked, then use it here
+        return Masked.of(value.getValue(), value.getMask());
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean isExact(MatchField<?> field) {
+        OFOxm<?> value = getOxm(field);
+        return (value != null && !value.isMasked());
+    }
+
+    @Override
+    public boolean isFullyWildcarded(MatchField<?> field) {
+        OFOxm<?> value = getOxm(field);
+        return (value == null);
+    }
+
+    @Override
+    public boolean isPartiallyMasked(MatchField<?> field) {
+        OFOxm<?> value = getOxm(field);
+        return (value != null && value.isMasked());
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder setExact(
+            MatchField<F> field, F value) {
+        initBuilder();
+        OFOxm<F> oxm = OFFactories.getFactory(OFVersion.OF_13).oxms().fromValue(value, field);
+        this.oxmListBuilder.set(oxm);
+        updateOxmList();
+        return this;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder setMasked(
+            MatchField<F> field, F value, F mask) {
+        initBuilder();
+        OFOxm<F> oxm = OFFactories.getFactory(OFVersion.OF_13).oxms().fromValueAndMask(value, mask, field);
+        this.oxmListBuilder.set(oxm);
+        updateOxmList();
+        return this;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder setMasked(
+            MatchField<F> field, Masked<F> valueWithMask) {
+        initBuilder();
+        OFOxm<F> oxm = OFFactories.getFactory(OFVersion.OF_13).oxms().fromMasked(valueWithMask, field);
+        this.oxmListBuilder.set(oxm);
+        updateOxmList();
+        return this;
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Match.Builder wildcard(MatchField<F> field) {
+        initBuilder();
+        this.oxmListBuilder.unset(field);
+        updateOxmList();
+        return this;
+    }
+
diff --git a/java_gen/templates/custom/OFMatchV3Ver13.java b/java_gen/templates/custom/OFMatchV3Ver13.java
new file mode 100644
index 0000000..24cab5b
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV3Ver13.java
@@ -0,0 +1,112 @@
+//:: from generic_utils import OrderedSet
+//:: from java_gen.java_model import model
+    @Override
+    public <F extends OFValueType<F>> F get(MatchField<F> field)
+            throws UnsupportedOperationException {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<F> oxm = this.oxmList.get(field);
+
+        if (oxm == null || !field.arePrerequisitesOK(this))
+            return null;
+
+        return oxm.getValue();
+    }
+
+    @Override
+    public <F extends OFValueType<F>> Masked<F> getMasked(MatchField<F> field)
+            throws UnsupportedOperationException {
+        if (!supportsMasked(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support masked matching on field " + field.getName());
+
+        OFOxm<F> oxm = this.oxmList.get(field);
+
+        if (oxm == null || !field.arePrerequisitesOK(this))
+            return null;
+
+        if (oxm.getMask() == null)
+            return null;
+
+        // TODO: Make OfOxm extend Masked and just return the OXM?
+        return Masked.of(oxm.getValue(), oxm.getMask());
+    }
+
+    private static boolean supportsField(MatchField<?> field) {
+        switch (field.id) {
+            //:: for id_constant in sorted(set(id_constant for _, id_constant, _ in model.oxm_map.values())):
+            case ${id_constant}:
+            //:: #endfor
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public boolean supports(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean supportsMasked(MatchField<?> field) {
+        return supportsField(field);
+    }
+
+    @Override
+    public boolean isExact(MatchField<?> field) {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<?> oxm = this.oxmList.get(field);
+
+        return oxm != null && !oxm.isMasked();
+    }
+
+    @Override
+    public boolean isFullyWildcarded(MatchField<?> field) {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<?> oxm = this.oxmList.get(field);
+
+        return oxm == null;
+    }
+
+    @Override
+    public boolean isPartiallyMasked(MatchField<?> field) {
+        if (!supports(field))
+            throw new UnsupportedOperationException("OFMatchV3Ver13 does not support matching on field " + field.getName());
+
+        OFOxm<?> oxm = this.oxmList.get(field);
+
+        return oxm != null && oxm.isMasked();
+    }
+
+    private class MatchFieldIterator extends UnmodifiableIterator<MatchField<?>> {
+        private Iterator<OFOxm<?>> oxmIterator;
+
+        MatchFieldIterator() {
+            oxmIterator = oxmList.iterator();
+        }
+
+        @Override
+        public boolean hasNext() {
+            return oxmIterator.hasNext();
+        }
+
+        @Override
+        public MatchField<?> next() {
+            OFOxm<?> next = oxmIterator.next();
+            return next.getMatchField();
+        }
+    }
+
+    @Override
+    public Iterable<MatchField<?>> getMatchFields() {
+        return new Iterable<MatchField<?>>() {
+            public Iterator<MatchField<?>> iterator() {
+                return new MatchFieldIterator();
+            }
+        };
+    }
diff --git a/java_gen/templates/custom/OFMatchV3Ver13_toString.java b/java_gen/templates/custom/OFMatchV3Ver13_toString.java
new file mode 100644
index 0000000..3b2783b
--- /dev/null
+++ b/java_gen/templates/custom/OFMatchV3Ver13_toString.java
@@ -0,0 +1 @@
+//:: include("custom/OFMatch_toString.java", msg=msg, has_parent=False)
diff --git a/java_gen/templates/custom/OFMatch_toString.java b/java_gen/templates/custom/OFMatch_toString.java
new file mode 100644
index 0000000..1eaf8a5
--- /dev/null
+++ b/java_gen/templates/custom/OFMatch_toString.java
@@ -0,0 +1,18 @@
+    @Override
+    public String toString() {
+        StringBuilder b = new StringBuilder("${msg.name}(");
+        boolean first = true;
+        for(MatchField<?> field : getMatchFields()) {
+            if(first)
+                first = false;
+            else
+                b.append(", ");
+            String name = field.getName();
+            b.append(name).append('=').append(this.get(field));
+            if(isPartiallyMasked(field)) {
+                b.append('/').append(this.getMasked(field).getMask());
+            }
+        }
+        b.append(")");
+        return b.toString();
+    }
diff --git a/java_gen/templates/custom/OFOxm_getCanonical.java b/java_gen/templates/custom/OFOxm_getCanonical.java
new file mode 100644
index 0000000..6681870
--- /dev/null
+++ b/java_gen/templates/custom/OFOxm_getCanonical.java
@@ -0,0 +1,17 @@
+//:: import re
+    public ${prop.java_type.public_type} getCanonical() {
+        //:: if not msg.member_by_name("masked").value == "true":
+        // exact match OXM is always canonical
+        return this;
+        //:: else:
+        //:: mask_type = msg.member_by_name("mask").java_type.public_type
+        if (${mask_type}.NO_MASK.equals(mask)) {
+            //:: unmasked = re.sub(r'(.*)Masked(Ver.*)', r'\1\2', msg.name)
+            return new ${unmasked}(value);
+        } else if(${mask_type}.FULL_MASK.equals(mask)) {
+            return null;
+        } else {
+            return this;
+        }
+        //:: #endif
+    }
diff --git a/java_gen/templates/of_class.java b/java_gen/templates/of_class.java
new file mode 100644
index 0000000..f1d72b2
--- /dev/null
+++ b/java_gen/templates/of_class.java
@@ -0,0 +1,436 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: from loxi_ir import *
+//:: import os
+//:: import itertools
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${msg.package};
+
+//:: include("_imports.java", msg=msg)
+
+class ${impl_class} implements ${msg.interface.inherited_declaration()} {
+//:: if genopts.instrument:
+    private static final Logger logger = LoggerFactory.getLogger(${impl_class}.class);
+//:: #endif
+    // version: ${version}
+    final static byte WIRE_VERSION = ${version.int_version};
+//:: if msg.is_fixed_length:
+    final static int LENGTH = ${msg.length};
+//:: else:
+    final static int MINIMUM_LENGTH = ${msg.min_length};
+//:: #endif
+
+//:: for prop in msg.data_members:
+    //:: if prop.java_type.public_type != msg.interface.member_by_name(prop.name).java_type.public_type:
+    //::    raise Exception("Interface and Class types do not match up: C: {} <-> I: {}".format(prop.java_type.public_type, msg.interface.member_by_name(prop.name).java_type.public_type))
+    //:: #endif
+    //:: if prop.default_value:
+        private final static ${prop.java_type.public_type} ${prop.default_name} = ${prop.default_value};
+    //:: #endif
+//:: #end
+
+    // OF message fields
+//:: for prop in msg.data_members:
+    private final ${prop.java_type.public_type} ${prop.name};
+//:: #endfor
+//
+//:: if all(prop.default_value for prop in msg.data_members):
+    // Immutable default instance
+    final static ${impl_class} DEFAULT = new ${impl_class}(
+        ${", ".join(prop.default_name for prop in msg.data_members)}
+    );
+//:: #endif
+
+    //:: if msg.data_members:
+    // package private constructor - used by readers, builders, and factory
+    ${impl_class}(${
+        ", ".join("%s %s" %(prop.java_type.public_type, prop.name) for prop in msg.data_members) }) {
+//:: for prop in msg.data_members:
+        this.${prop.name} = ${prop.name};
+//:: #endfor
+    }
+    //:: else:
+    final static ${impl_class} INSTANCE = new ${impl_class}();
+    // private empty constructor - use shared instance!
+    private ${impl_class}() {
+    }
+    //:: #endif
+
+    // Accessors for OF message fields
+    //:: include("_field_accessors.java", msg=msg, generate_setters=False, builder=False, has_parent=False)
+
+    //:: if os.path.exists("%s/custom/%s.java" % (template_dir, msg.name)):
+    //:: include("custom/%s.java" % msg.name, msg=msg)
+    //:: #endif
+
+    //:: if msg.data_members:
+    public ${msg.interface.name}.Builder createBuilder() {
+        return new BuilderWithParent(this);
+    }
+
+    static class BuilderWithParent implements ${msg.interface.name}.Builder {
+        final ${impl_class} parentMessage;
+
+        // OF message fields
+//:: for prop in msg.data_members:
+        private boolean ${prop.name}Set;
+        private ${prop.java_type.public_type} ${prop.name};
+//:: #endfor
+
+        BuilderWithParent(${impl_class} parentMessage) {
+            this.parentMessage = parentMessage;
+        }
+
+//:: include("_field_accessors.java", msg=msg, generate_setters=True, builder=True, has_parent=True)
+
+
+        @Override
+        public ${msg.interface.name} build() {
+                //:: for prop in msg.data_members:
+                ${prop.java_type.public_type} ${prop.name} = this.${prop.name}Set ? this.${prop.name} : parentMessage.${prop.name};
+                //::    if not prop.is_nullable and not prop.java_type.is_primitive:
+                if(${prop.name} == null)
+                    throw new NullPointerException("Property ${prop.name} must not be null");
+                //::    #endif
+                //:: #endfor
+
+                //
+                //:: if os.path.exists("%s/custom/%s.Builder_normalize_stanza.java" % (template_dir, msg.name)):
+                //:: include("custom/%s.Builder_normalize_stanza.java" % msg.name, msg=msg, has_parent=False)
+                //:: #endif
+                return new ${impl_class}(
+                //:: for i, prop in enumerate(msg.data_members):
+                //::    comma = "," if i < len(msg.data_members)-1 else ""
+                    ${prop.name}${comma}
+                //:: #endfor
+                );
+        }
+        //:: if os.path.exists("%s/custom/%s.Builder.java" % (template_dir, msg.name)):
+        //:: include("custom/%s.Builder.java" % msg.name, msg=msg, has_parent=True)
+        //:: #endif
+
+    }
+
+    static class Builder implements ${msg.interface.name}.Builder {
+        // OF message fields
+//:: for prop in msg.data_members:
+        private boolean ${prop.name}Set;
+        private ${prop.java_type.public_type} ${prop.name};
+//:: #endfor
+
+//:: include("_field_accessors.java", msg=msg, generate_setters=True, builder=True, has_parent=False)
+//
+        @Override
+        public ${msg.interface.name} build() {
+            //:: for prop in msg.data_members:
+            //::    if prop.default_value:
+            ${prop.java_type.public_type} ${prop.name} = this.${prop.name}Set ? this.${prop.name} : ${prop.default_name};
+            //:: else:
+            if(!this.${prop.name}Set)
+                throw new IllegalStateException("Property ${prop.name} doesn't have default value -- must be set");
+            //::    #endif
+            //::    if not prop.is_nullable and not prop.java_type.is_primitive:
+            if(${prop.name} == null)
+                throw new NullPointerException("Property ${prop.name} must not be null");
+            //::    #endif
+            //:: #endfor
+
+            //:: if os.path.exists("%s/custom/%s.Builder_normalize_stanza.java" % (template_dir, msg.name)):
+            //:: include("custom/%s.Builder_normalize_stanza.java" % msg.name, msg=msg, has_parent=False)
+            //:: #endif
+
+            return new ${impl_class}(
+                //:: for i, prop in enumerate(msg.data_members):
+                //::    comma = "," if i < len(msg.data_members)-1 else ""
+                    ${prop.name}${comma}
+                //:: #endfor
+                );
+        }
+        //:: if os.path.exists("%s/custom/%s.Builder.java" % (template_dir, msg.name)):
+        //:: include("custom/%s.Builder.java" % msg.name, msg=msg, has_parent=False)
+        //:: #endif
+
+    }
+    //:: else:
+    // no data members - do not support builder
+    public ${msg.interface.name}.Builder createBuilder() {
+        throw new UnsupportedOperationException("${impl_class} has no mutable properties -- builder unneeded");
+    }
+    //:: #endif
+
+
+    final static Reader READER = new Reader();
+    static class Reader implements OFMessageReader<${msg.interface.name}> {
+        @Override
+        public ${msg.interface.name} readFrom(ChannelBuffer bb) throws OFParseError {
+//:: for prop in msg.members:
+//:: if not prop.is_virtual and (prop.is_length_value or prop.is_field_length_value):
+            int start = bb.readerIndex();
+//::     break
+//:: #endif
+//:: #endfor
+//:: fields_with_length_member = {}
+//:: for prop in msg.members:
+//:: if prop.is_virtual:
+//::    continue
+//:: elif prop.is_data:
+            ${prop.java_type.public_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=True,
+                    length=fields_with_length_member[prop.c_name] if prop.c_name in fields_with_length_member else None)};
+//:: elif prop.is_pad:
+            // pad: ${prop.length} bytes
+            bb.skipBytes(${prop.length});
+//:: elif prop.is_length_value:
+            ${prop.java_type.public_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=True)};
+            //:: if prop.is_fixed_value:
+            if(${prop.name} != ${prop.value})
+                throw new OFParseError("Wrong ${prop.name}: Expected=${prop.enum_value}(${prop.value}), got="+${prop.name});
+            //:: else:
+            if(${prop.name} < MINIMUM_LENGTH)
+                throw new OFParseError("Wrong ${prop.name}: Expected to be >= " + MINIMUM_LENGTH + ", was: " + ${prop.name});
+            //:: #endif
+            if(bb.readableBytes() + (bb.readerIndex() - start) < ${prop.name}) {
+                // Buffer does not have all data yet
+                bb.readerIndex(start);
+                return null;
+            }
+            //:: if genopts.instrument:
+            if(logger.isTraceEnabled())
+                logger.trace("readFrom - length={}", ${prop.name});
+            //:: #endif
+//:: elif prop.is_fixed_value:
+            // fixed value property ${prop.name} == ${prop.value}
+            ${prop.java_type.priv_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=False)};
+            if(${prop.name} != ${prop.priv_value})
+                throw new OFParseError("Wrong ${prop.name}: Expected=${prop.enum_value}(${prop.value}), got="+${prop.name});
+//:: elif prop.is_field_length_value:
+//::        fields_with_length_member[prop.member.field_name] = prop.name
+            ${prop.java_type.public_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=True)};
+//:: else:
+    // fixme: todo ${prop.name}
+//:: #endif
+//:: #endfor
+            //:: if msg.align:
+            //:: if msg.length_includes_align:
+            // align message to ${msg.align} bytes (length contains aligned value)
+            bb.skipBytes(length - (bb.readerIndex() - start));
+            //:: else:
+            // align message to ${msg.align} bytes (length does not contain alignment)
+            bb.skipBytes(((length + ${msg.align-1})/${msg.align} * ${msg.align} ) - length );
+            //:: #endif
+            //:: #endif
+
+            //:: if msg.data_members:
+            //:: if os.path.exists("%s/custom/%s.Reader_normalize_stanza.java" % (template_dir, msg.name)):
+            //:: include("custom/%s.Reader_normalize_stanza.java" % msg.name, msg=msg, has_parent=False)
+            //:: #endif
+            ${impl_class} ${msg.variable_name} = new ${impl_class}(
+                    ${",\n                      ".join(
+                         [ prop.name for prop in msg.data_members])}
+                    );
+            //:: if genopts.instrument:
+            if(logger.isTraceEnabled())
+                logger.trace("readFrom - read={}", ${msg.variable_name});
+            //:: #endif
+            return ${msg.variable_name};
+            //:: else:
+            //:: if genopts.instrument:
+            if(logger.isTraceEnabled())
+                logger.trace("readFrom - returning shared instance={}", INSTANCE);
+            //:: #endif
+            return INSTANCE;
+            //:: #endif
+        }
+    }
+
+    public void putTo(PrimitiveSink sink) {
+        FUNNEL.funnel(this, sink);
+    }
+
+    final static ${impl_class}Funnel FUNNEL = new ${impl_class}Funnel();
+    static class ${impl_class}Funnel implements Funnel<${impl_class}> {
+        private static final long serialVersionUID = 1L;
+        @Override
+        public void funnel(${impl_class} message, PrimitiveSink sink) {
+//:: for prop in msg.members:
+//:: if prop.is_virtual:
+//::    continue
+//:: elif prop.is_data:
+            ${prop.java_type.funnel_op(version, "message." + prop.name, pub_type=True)};
+//:: elif prop.is_pad:
+            // skip pad (${prop.length} bytes)
+//:: elif prop.is_fixed_value:
+            // fixed value property ${prop.name} = ${prop.value}
+            ${prop.java_type.funnel_op(version, prop.priv_value, pub_type=False)};
+//:: else:
+            // FIXME: skip funnel of ${prop.name}
+//:: #endif
+//:: #endfor
+        }
+    }
+
+
+    public void writeTo(ChannelBuffer bb) {
+        WRITER.write(bb, this);
+    }
+
+    final static Writer WRITER = new Writer();
+    static class Writer implements OFMessageWriter<${impl_class}> {
+        @Override
+        public void write(ChannelBuffer bb, ${impl_class} message) {
+//:: if not msg.is_fixed_length:
+            int startIndex = bb.writerIndex();
+//:: #endif
+//:: fields_with_length_member = {}
+//:: for prop in msg.members:
+//:: if prop.c_name in fields_with_length_member:
+            int ${prop.name}StartIndex = bb.writerIndex();
+//:: #endif
+//:: if prop.is_virtual:
+//::    continue
+//:: elif prop.is_data:
+            ${prop.java_type.write_op(version, "message." + prop.name, pub_type=True)};
+//:: elif prop.is_pad:
+            // pad: ${prop.length} bytes
+            bb.writeZero(${prop.length});
+//:: elif prop.is_fixed_value:
+            // fixed value property ${prop.name} = ${prop.value}
+            ${prop.java_type.write_op(version, prop.priv_value, pub_type=False)};
+//:: elif prop.is_length_value:
+            // ${prop.name} is length of variable message, will be updated at the end
+//:: if not msg.is_fixed_length:
+            int lengthIndex = bb.writerIndex();
+//:: #end
+            ${prop.java_type.write_op(version, 0)};
+
+//:: elif prop.is_field_length_value:
+//::        fields_with_length_member[prop.member.field_name] = prop.name
+            // ${prop.name} is length indicator for ${prop.member.field_name}, will be
+            // udpated when ${prop.member.field_name} has been written
+            int ${prop.name}Index = bb.writerIndex();
+            ${prop.java_type.write_op(version, 0, pub_type=False)};
+//:: else:
+            // FIXME: todo write ${prop.name}
+//:: #endif
+//:: if prop.c_name in fields_with_length_member:
+//::     length_member_name = fields_with_length_member[prop.c_name]
+            // update field length member ${length_member_name}
+            int ${prop.name}Length = bb.writerIndex() - ${prop.name}StartIndex;
+            bb.setShort(${length_member_name}Index, ${prop.name}Length);
+//:: #endif
+//:: #endfor
+
+//:: if not msg.is_fixed_length:
+            // update length field
+            int length = bb.writerIndex() - startIndex;
+            //:: if msg.align:
+            int alignedLength = ((length + ${msg.align-1})/${msg.align} * ${msg.align});
+            //:: #endif
+            bb.setShort(lengthIndex, ${"alignedLength" if msg.length_includes_align else "length"});
+            //:: if msg.align:
+            // align message to ${msg.align} bytes
+            bb.writeZero(alignedLength - length);
+            //:: #endif
+//:: #end
+
+        }
+    }
+
+    //:: if os.path.exists("%s/custom/%s_toString.java" % (template_dir, msg.name)):
+    //:: include("custom/%s_toString.java" % msg.name, msg=msg, has_parent=False)
+    //:: else:
+    @Override
+    public String toString() {
+        StringBuilder b = new StringBuilder("${msg.name}(");
+        //:: for i, prop in enumerate(msg.data_members):
+        //:: if i > 0:
+        b.append(", ");
+        //:: #endif
+        b.append("${prop.name}=").append(${ "Arrays.toString(%s)" % prop.name if prop.java_type.is_array else prop.name });
+        //:: #endfor
+        b.append(")");
+        return b.toString();
+    }
+    //:: #endif
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        //:: if len(msg.data_members) > 0:
+        ${msg.name} other = (${msg.name}) obj;
+        //:: #endif
+
+        //:: for prop in msg.data_members:
+        //:: if prop.java_type.is_primitive:
+        if( ${prop.name} != other.${prop.name})
+            return false;
+        //:: elif prop.java_type.is_array:
+        if (!Arrays.equals(${prop.name}, other.${prop.name}))
+                return false;
+        //:: else:
+        if (${prop.name} == null) {
+            if (other.${prop.name} != null)
+                return false;
+        } else if (!${prop.name}.equals(other.${prop.name}))
+            return false;
+        //:: #endif
+        //:: #endfor
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        //:: if len(msg.data_members) > 0:
+        final int prime = 31;
+        //:: #endif
+        int result = 1;
+
+        //:: for prop in msg.data_members:
+        //:: if prop.java_type.pub_type == 'long':
+        result = prime *  (int) (${prop.name} ^ (${prop.name} >>> 32));
+        //:: elif prop.java_type.pub_type == 'boolean':
+        result = prime * result + (${prop.name} ? 1231 : 1237);
+        //:: elif prop.java_type.is_primitive:
+        result = prime * result + ${prop.name};
+        //:: elif prop.java_type.is_array:
+        result = prime * result + Arrays.hashCode(${prop.name});
+        //:: else:
+        result = prime * result + ((${prop.name} == null) ? 0 : ${prop.name}.hashCode());
+        //:: #endif
+        //:: #endfor
+        return result;
+    }
+
+}
diff --git a/java_gen/templates/of_factories.java b/java_gen/templates/of_factories.java
new file mode 100644
index 0000000..f9ec015
--- /dev/null
+++ b/java_gen/templates/of_factories.java
@@ -0,0 +1,72 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package org.projectfloodlight.openflow.protocol;
+
+//:: include("_imports.java")
+
+public final class OFFactories {
+
+    private static final GenericReader GENERIC_READER = new GenericReader();
+
+    public static OFFactory getFactory(OFVersion version) {
+        switch(version) {
+            //:: for v in versions:
+            case ${v.constant_version}:
+                return org.projectfloodlight.openflow.protocol.ver${v.dotless_version}.OFFactoryVer${v.dotless_version}.INSTANCE;
+            //:: #endfor
+            default:
+                throw new IllegalArgumentException("Unknown version: "+version);
+            }
+    }
+
+    private static class GenericReader implements OFMessageReader<OFMessage> {
+        public OFMessage readFrom(ChannelBuffer bb) throws OFParseError {
+            short wireVersion = U8.f(bb.getByte(0));
+            OFFactory factory;
+            switch (wireVersion) {
+            //:: for v in versions:
+            case ${v.int_version}:
+                factory = org.projectfloodlight.openflow.protocol.ver${v.dotless_version}.OFFactoryVer${v.dotless_version}.INSTANCE;
+                break;
+            //:: #endfor
+            default:
+                throw new IllegalArgumentException("Unknown wire version: " + wireVersion);
+            }
+            return factory.getReader().readFrom(bb);
+        }
+    }
+
+    public static OFMessageReader<OFMessage> getGenericReader() {
+        return GENERIC_READER;
+    }
+}
diff --git a/java_gen/templates/of_factory_class.java b/java_gen/templates/of_factory_class.java
new file mode 100644
index 0000000..ef26ca0
--- /dev/null
+++ b/java_gen/templates/of_factory_class.java
@@ -0,0 +1,175 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: import re
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${factory.package};
+
+import org.projectfloodlight.openflow.protocol.OFOxmList;
+
+//:: include("_imports.java")
+
+public class ${factory.name} implements ${factory.interface.name} {
+    public final static ${factory.name} INSTANCE = new ${factory.name}();
+
+    //:: if factory.interface.xid_generator:
+    private final XidGenerator xidGenerator = XidGenerators.global();
+    //:: #endif
+
+    //:: for name, clazz in factory.interface.sub_factories.items():
+    public ${clazz} ${name}() {
+        return ${clazz}Ver${factory.version.dotless_version}.INSTANCE;
+    }
+    //:: #endfor
+
+//:: general_get_match_func_written = False
+//:: for i in factory.interface.members:
+    //:: if i.is_virtual:
+    //::    continue
+    //:: #endif
+    //:: is_match_object = re.match('OFMatch.*', i.name) # i.has_version(factory.version) and model.generate_class(i.versioned_class(factory.version)) and i.versioned_class(factory.version).interface.parent_interface == 'Match'
+    //:: unsupported_match_object = is_match_object and not i.has_version(factory.version)
+
+    //:: if len(i.writeable_members) > 0:
+    public ${i.name}.Builder ${factory.interface.method_name(i, builder=True)}() {
+        //::   if i.has_version(factory.version) and model.generate_class(i.versioned_class(factory.version)):
+        return new ${i.versioned_class(factory.version).name}.Builder()${".setXid(nextXid())" if i.member_by_name("xid") else ""};
+        //:: else:
+        throw new UnsupportedOperationException("${i.name} not supported in version ${factory.version}");
+        //:: #endif
+    }
+    //:: #endif
+    //:: if not general_get_match_func_written and is_match_object and not unsupported_match_object:
+    public Match.Builder buildMatch() {
+        return new ${i.versioned_class(factory.version).name}.Builder();
+    }
+
+    final static Match MATCH_WILDCARD_ALL = ${i.versioned_class(factory.version).name}.DEFAULT;
+
+    public Match matchWildcardAll() {
+        return MATCH_WILDCARD_ALL;
+    }
+    //::     general_get_match_func_written = True
+    //:: #endif
+    //:: if len(i.writeable_members) <= 2:
+    public ${i.name} ${factory.interface.method_name(i, builder=False)}(${", ".join("%s %s" % (p.java_type.public_type, p.name) for p in i.writeable_members if p.name != "xid" )}) {
+        //::   if i.has_version(factory.version) and model.generate_class(i.versioned_class(factory.version)):
+        //:: if len(i.writeable_members) > 0:
+        return new ${i.versioned_class(factory.version).name}(
+                ${",\n                      ".join(
+                         [ prop.name if prop.name != "xid" else "nextXid()" for prop in i.versioned_class(factory.version).data_members])}
+                    );
+        //:: else:
+        return ${i.versioned_class(factory.version).name}.INSTANCE;
+        //:: #endif
+        //:: else:
+        throw new UnsupportedOperationException("${i.name} not supported in version ${factory.version}");
+        //:: #endif
+    }
+    //:: #endif
+//:: #endfor
+
+    public OFMessageReader<${factory.base_class}> getReader() {
+//:: if factory.versioned_base_class:
+        return ${factory.versioned_base_class.name}.READER;
+//:: else:
+        throw new UnsupportedOperationException("Reader<${factory.base_class}> not supported in version ${factory.version}");
+//:: #endif
+    }
+
+//:: if factory.interface.name == 'OFOxms':
+    @SuppressWarnings("unchecked")
+    public <F extends OFValueType<F>> OFOxm<F> fromValue(F value, MatchField<F> field) {
+        switch (field.id) {
+            //:: for oxm_name in model.oxm_map:
+            //::    type_name, value, masked = model.oxm_map[oxm_name]
+            //::    if masked:
+            //::        continue
+            //::    #endif
+            //::    method_name = oxm_name.replace('OFOxm', '')
+            //::    method_name = method_name[0].lower() + method_name[1:]
+            case ${value}:
+                //:: # The cast to Object is done to avoid some javac bug that in some versions cannot handle cast from generic type to other types but Object
+                return (OFOxm<F>)((Object)${method_name}((${type_name})((Object)value)));
+            //:: #endfor
+            default:
+                throw new IllegalArgumentException("No OXM known for match field " + field);
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    public <F extends OFValueType<F>> OFOxm<F> fromValueAndMask(F value, F mask, MatchField<F> field) {
+        switch (field.id) {
+            //:: for oxm_name in model.oxm_map:
+            //::    type_name, value, masked = model.oxm_map[oxm_name]
+            //::    if not masked:
+            //::        continue
+            //::    #endif
+            //::    method_name = oxm_name.replace('OFOxm', '')
+            //::    method_name = method_name[0].lower() + method_name[1:]
+            case ${value}:
+                //:: # The cast to Object is done to avoid some javac bug that in some versions cannot handle cast from generic type to other types but Object
+                return (OFOxm<F>)((Object)${method_name}((${type_name})((Object)value), (${type_name})((Object)mask)));
+            //:: #endfor
+            default:
+                throw new IllegalArgumentException("No OXM known for match field " + field);
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    public <F extends OFValueType<F>> OFOxm<F> fromMasked(Masked<F> masked, MatchField<F> field) {
+        switch (field.id) {
+            //:: for oxm_name in model.oxm_map:
+            //::    type_name, value, masked = model.oxm_map[oxm_name]
+            //::    if not masked:
+            //::        continue
+            //::    #endif
+            //::    method_name = oxm_name.replace('OFOxm', '')
+            //::    method_name = method_name[0].lower() + method_name[1:]
+            case ${value}:
+                //:: # The cast to Object is done to avoid some javac bug that in some versions cannot handle cast from generic type to other types but Object
+                return (OFOxm<F>)((Object)${method_name}((${type_name})((Object)(masked.getValue())), (${type_name})((Object)(masked.getMask()))));
+            //:: #endfor
+            default:
+                return null;
+        }
+    }
+//:: #endif
+//:: if factory.interface.xid_generator:
+    public long nextXid() {
+        return xidGenerator.nextXid();
+    }
+//:: #endif
+
+    public OFVersion getVersion() {
+            return OFVersion.${factory.version.constant_version};
+    }
+}
diff --git a/java_gen/templates/of_factory_interface.java b/java_gen/templates/of_factory_interface.java
new file mode 100644
index 0000000..3694530
--- /dev/null
+++ b/java_gen/templates/of_factory_interface.java
@@ -0,0 +1,68 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: import re
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${factory.package};
+
+//:: include("_imports.java")
+
+public interface ${factory.name}${" extends XidGenerator" if factory.xid_generator else ""} {
+    // Subfactories
+//:: for name, clazz in factory.sub_factories.items():
+    ${clazz} ${name}();
+//:: #endfor
+
+//:: for i in factory.members:
+    //:: if i.is_virtual:
+    //::    continue
+    //:: #endif
+    //:: if len(i.writeable_members) > 0:
+    ${i.name}.Builder ${factory.method_name(i, builder=True)}()${ "" if i.is_universal else " throws UnsupportedOperationException"};
+    //:: #endif
+    //:: if len(i.writeable_members) <= 2:
+    ${i.name} ${factory.method_name(i, builder=False )}(${", ".join("%s %s" % (p.java_type.public_type, p.name) for p in i.writeable_members if p.name != "xid" )});
+    //:: #endif
+//:: #endfor
+//:: if factory.name == 'OFFactory':
+    Match.Builder buildMatch();
+    Match matchWildcardAll();
+//:: #endif
+
+    OFMessageReader<${factory.base_class}> getReader();
+    OFVersion getVersion();
+//:: if factory.name == 'OFOxms':
+
+    public <F extends OFValueType<F>> OFOxm<F> fromValue(F value, MatchField<F> field);
+    public <F extends OFValueType<F>> OFOxm<F> fromValueAndMask(F value, F mask, MatchField<F> field);
+    public <F extends OFValueType<F>> OFOxm<F> fromMasked(Masked<F> masked, MatchField<F> field);
+//:: #endif
+}
diff --git a/java_gen/templates/of_interface.java b/java_gen/templates/of_interface.java
new file mode 100644
index 0000000..a515ad1
--- /dev/null
+++ b/java_gen/templates/of_interface.java
@@ -0,0 +1,56 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: import itertools
+//:: import re
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${msg.package};
+
+//:: include("_imports.java", msg=msg)
+
+public interface ${msg.name}${ "<%s>" % msg.type_annotation if msg.type_annotation else ""} extends ${", ".join(msg.all_parent_interfaces)} {
+//:: for prop in msg.members:
+    ${prop.java_type.public_type} ${prop.getter_name}()${ "" if prop.is_universal else " throws UnsupportedOperationException"};
+//:: #endfor
+
+    void writeTo(ChannelBuffer channelBuffer);
+
+    Builder${msg.type_variable} createBuilder();
+    //:: simple_type, annotation = re.match(r'(\w+)(<.*>)?', msg.parent_interface).groups() if msg.parent_interface else ("", "")
+    public interface Builder${ "<%s>" % msg.type_annotation if msg.type_annotation else ""} ${"extends %s.Builder" % simple_type if msg.parent_interface else ""}${annotation if annotation else ""} {
+        ${msg.name}${msg.type_variable} build();
+//:: for prop in msg.members:
+        ${prop.java_type.public_type} ${prop.getter_name}()${ "" if prop.is_universal else " throws UnsupportedOperationException"};
+//:: if prop.is_writeable:
+        Builder${msg.type_variable} ${prop.setter_name}(${prop.java_type.public_type} ${prop.name})${ "" if prop.is_universal else " throws UnsupportedOperationException"};
+//:: #endif
+//:: #endfor
+    }
+}
diff --git a/java_gen/templates/of_virtual_class.java b/java_gen/templates/of_virtual_class.java
new file mode 100644
index 0000000..2c31c75
--- /dev/null
+++ b/java_gen/templates/of_virtual_class.java
@@ -0,0 +1,107 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: from loxi_ir import *
+//:: import os
+//:: import itertools
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${msg.package};
+
+//:: include("_imports.java", msg=msg)
+
+abstract class ${msg.name} {
+    // version: ${version}
+    final static byte WIRE_VERSION = ${version.int_version};
+//:: if msg.is_fixed_length:
+    final static int LENGTH = ${msg.length};
+//:: else:
+    final static int MINIMUM_LENGTH = ${msg.min_length};
+//:: #endif
+
+
+    public final static ${msg.name}.Reader READER = new Reader();
+
+    static class Reader implements OFMessageReader<${msg.interface.inherited_declaration()}> {
+        @Override
+        public ${msg.interface.inherited_declaration()} readFrom(ChannelBuffer bb) throws OFParseError {
+//:: if msg.is_fixed_length:
+            if(bb.readableBytes() < LENGTH)
+//:: else:
+            if(bb.readableBytes() < MINIMUM_LENGTH)
+//:: #endif
+                return null;
+            int start = bb.readerIndex();
+//:: fields_with_length_member = {}
+//::    for prop in msg.members:
+//::       if prop.is_data:
+            ${prop.java_type.skip_op(version,
+                    length=fields_with_length_member[prop.c_name] if prop.c_name in fields_with_length_member else None)};
+//:: elif prop.is_pad:
+            // pad: ${prop.length} bytes
+            bb.skipBytes(${prop.length});
+//:: elif prop.is_fixed_value:
+            // fixed value property ${prop.name} == ${prop.value}
+            ${prop.java_type.priv_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=False)};
+            if(${prop.name} != ${prop.priv_value})
+                throw new OFParseError("Wrong ${prop.name}: Expected=${prop.enum_value}(${prop.value}), got="+${prop.name});
+//:: elif prop.is_length_value:
+            ${prop.java_type.public_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=True)};
+            if(${prop.name} < MINIMUM_LENGTH)
+                throw new OFParseError("Wrong ${prop.name}: Expected to be >= " + MINIMUM_LENGTH + ", was: " + ${prop.name});
+//:: elif prop.is_field_length_value:
+//::        fields_with_length_member[prop.member.field_name] = prop.name
+            int ${prop.name} = ${prop.java_type.read_op(version)};
+//:: elif prop.is_discriminator:
+            ${prop.java_type.priv_type} ${prop.name} = ${prop.java_type.read_op(version, pub_type=False)};
+            bb.readerIndex(start);
+            switch(${prop.name}) {
+//::     for sub in msg.subclasses:
+//::           if not model.generate_class(sub):
+               // skip ${sub.name} - excluded from generation
+//::           else:
+//::           m = sub.member_by_name(prop.name)
+//::           if not m.is_fixed_value:
+//::                  raise Exception("subtype %s of %s does not have fixed value for discriminator %s" %
+//::                           (sub.name, msg.name, prop.name))
+//::           #endif
+               case ${m.priv_value}:
+                   // discriminator value ${m.enum_value}=${m.value} for class ${sub.name}
+                   return ${sub.name}.READER.readFrom(bb);
+//:: #endif    # generate_class
+//:: #endfor
+               default:
+                   throw new OFParseError("Unknown value for discriminator ${prop.name} of class ${msg.name}: " + ${prop.name});
+            }
+//::        break
+//:: #endif
+//:: #endfor
+        }
+    }
+}
diff --git a/java_gen/templates/unit_test.java b/java_gen/templates/unit_test.java
new file mode 100644
index 0000000..cf01429
--- /dev/null
+++ b/java_gen/templates/unit_test.java
@@ -0,0 +1,119 @@
+//:: # Copyright 2013, Big Switch Networks, Inc.
+//:: #
+//:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+//:: # the following special exception:
+//:: #
+//:: # LOXI Exception
+//:: #
+//:: # As a special exception to the terms of the EPL, you may distribute libraries
+//:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+//:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+//:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+//:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+//:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+//:: #
+//:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//:: #
+//:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+//:: # a copy of the EPL at:
+//:: #
+//:: # http::: #www.eclipse.org/legal/epl-v10.html
+//:: #
+//:: # Unless required by applicable law or agreed to in writing, software
+//:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+//:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+//:: # EPL for the specific language governing permissions and limitations
+//:: # under the EPL.
+//::
+//:: from loxi_ir import *
+//:: import itertools
+//:: import java_gen.java_model as java_model
+//:: include('_copyright.java')
+
+//:: include('_autogen.java')
+
+package ${test.package};
+
+//:: include("_imports.java", msg=msg)
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.hamcrest.CoreMatchers;
+
+
+public class ${test.name} {
+    //:: factory = java_model.model.factory_of(test.interface)
+    //:: var_type = msg.interface.name
+    //:: var_name = msg.interface.variable_name
+    //:: use_builder = len(msg.data_members) > 0
+    //:: factory_method = factory.method_name(msg.interface, builder=use_builder)
+    //:: factory_impl = java_model.model.factory_of(test.interface).of_version(test.java_class.version).name
+    ${factory.name if factory.name is not None else "OFFactory"} factory;
+
+    final static byte[] ${msg.constant_name}_SERIALIZED =
+        new byte[] { ${", ".join("%s0x%x" % (("" if ord(c)<128 else "(byte) "),  ord(c)) for c in test_data["binary"] ) } };
+
+    @Before
+    public void setup() {
+        factory = ${factory_impl + ".INSTANCE" if factory_impl is not None else "OFFactories.getFactory(OFVersion." + version.constant_version + ")"};
+    }
+
+    //:: if "java" in test_data:
+    @Test
+    public void testWrite() {
+        //:: if use_builder:
+        ${var_type}.Builder builder = factory.${factory_method}();
+        ${test_data["java"]};
+        ${var_type} ${var_name} = builder.build();
+        //:: else:
+        ${var_type} ${var_name} = factory.${factory_method}();
+        //:: #endif
+        ChannelBuffer bb = ChannelBuffers.dynamicBuffer();
+        ${var_name}.writeTo(bb);
+        byte[] written = new byte[bb.readableBytes()];
+        bb.readBytes(written);
+
+        assertThat(written, CoreMatchers.equalTo(${msg.constant_name}_SERIALIZED));
+    }
+
+    @Test
+    public void testRead() throws Exception {
+        //:: if use_builder:
+        ${var_type}.Builder builder = factory.${factory_method}();
+        ${test_data["java"]};
+        ${var_type} ${var_name}Built = builder.build();
+        //:: else:
+        ${var_type} ${var_name}Built = factory.${factory_method}();
+        //:: #endif
+
+        ChannelBuffer input = ChannelBuffers.copiedBuffer(${msg.constant_name}_SERIALIZED);
+
+        // FIXME should invoke the overall reader once implemented
+        ${var_type} ${var_name}Read = ${msg.name}.READER.readFrom(input);
+        assertEquals(${msg.constant_name}_SERIALIZED.length, input.readerIndex());
+
+        assertEquals(${var_name}Built, ${var_name}Read);
+   }
+   //:: else:
+   // FIXME: No java stanza in test_data for this class. Add for more comprehensive unit testing
+   //:: #endif
+
+   @Test
+   public void testReadWrite() throws Exception {
+       ChannelBuffer input = ChannelBuffers.copiedBuffer(${msg.constant_name}_SERIALIZED);
+
+       // FIXME should invoke the overall reader once implemented
+       ${var_type} ${var_name} = ${msg.name}.READER.readFrom(input);
+       assertEquals(${msg.constant_name}_SERIALIZED.length, input.readerIndex());
+
+       // write message again
+       ChannelBuffer bb = ChannelBuffers.dynamicBuffer();
+       ${var_name}.writeTo(bb);
+       byte[] written = new byte[bb.readableBytes()];
+       bb.readBytes(written);
+
+       assertThat(written, CoreMatchers.equalTo(${msg.constant_name}_SERIALIZED));
+   }
+
+}
diff --git a/lang_c.py b/lang_c.py
index 4addf0a..0a881e9 100644
--- a/lang_c.py
+++ b/lang_c.py
@@ -33,12 +33,17 @@
 """
 
 import os
+import c_gen.of_g_legacy as of_g
+import c_gen.build_of_g as build_of_g
 import c_gen.c_code_gen as c_code_gen
 import c_gen.c_test_gen as c_test_gen
 import c_gen.c_dump_gen as c_dump_gen
 import c_gen.c_show_gen as c_show_gen
 import c_gen.c_validator_gen as c_validator_gen
 import c_gen.util
+import c_gen.codegen
+import loxi_utils.loxi_utils as loxi_utils
+import template_utils
 
 def static(out, name):
     c_gen.util.render_template(out, os.path.basename(name))
@@ -67,8 +72,6 @@
     'loci/inc/loci/of_wire_buf.h': static,
 
     # LOCI code
-    'loci/src/loci.c': c_code_gen.top_c_gen,
-    'loci/src/of_type_data.c': c_code_gen.type_data_c_gen,
     'loci/src/of_match.c': c_code_gen.match_c_gen,
     'loci/src/loci_obj_dump.c': c_dump_gen.gen_obj_dump_c,
     'loci/src/loci_obj_show.c': c_show_gen.gen_obj_show_c,
@@ -79,9 +82,9 @@
     'loci/src/loci_log.c': static,
     'loci/src/loci_log.h': static,
     'loci/src/of_object.c': static,
-    'loci/src/of_type_maps.c': static,
     'loci/src/of_utils.c': static,
     'loci/src/of_wire_buf.c': static,
+    'loci/src/loci_setup_from_add_fns.c': static,
 
     # Static LOCI documentation
     'loci/README': static,
@@ -97,6 +100,7 @@
     'locitest/src/test_msg.c': c_test_gen.gen_msg_test,
     'locitest/src/test_scalar_acc.c': c_test_gen.gen_message_scalar_test,
     'locitest/src/test_uni_acc.c': c_test_gen.gen_unified_accessor_tests,
+    'locitest/src/test_data.c': c_test_gen.gen_datafiles_tests,
 
     # Static locitest code
     'locitest/inc/locitest/unittest.h': static,
@@ -106,4 +110,24 @@
     'locitest/src/test_setup_from_add.c': static,
     'locitest/src/test_utils.c': static,
     'locitest/src/test_validator.c': static,
+    'locitest/src/main.c': static,
+    'locitest/Makefile': static,
 }
+
+def generate(install_dir):
+    build_of_g.initialize_versions()
+    build_of_g.build_ordered_classes()
+    build_of_g.populate_type_maps()
+    build_of_g.analyze_input()
+    build_of_g.unify_input()
+    build_of_g.order_and_assign_object_ids()
+    for (name, fn) in targets.items():
+        with template_utils.open_output(install_dir, name) as outfile:
+            fn(outfile, os.path.basename(name))
+    c_gen.codegen.generate_classes(install_dir)
+    c_gen.codegen.generate_header_classes(install_dir)
+    c_gen.codegen.generate_classes_header(install_dir)
+    c_gen.codegen.generate_lists(install_dir)
+    c_gen.codegen.generate_strings(install_dir)
+    c_gen.codegen.generate_init_map(install_dir)
+    c_gen.codegen.generate_type_maps(install_dir)
diff --git a/loxi_front_end/c_parse_utils.py b/lang_java.py
similarity index 66%
rename from loxi_front_end/c_parse_utils.py
rename to lang_java.py
index 5e8d471..f103330 100644
--- a/loxi_front_end/c_parse_utils.py
+++ b/lang_java.py
@@ -25,27 +25,11 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 
-##
-# @brief Utilities related to parsing C files
-#
-import of_g
+"""
+@brief Java language specific LOXI generating configuration
+"""
 
-def type_dec_to_count_base(m_type):
-    """
-    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
-    (uint8_t)
+import java_gen.codegen as java_codegen
 
-    @param m_type The string type declaration to process
-    """
-    count = 1
-    chk_ar = m_type.split('[')
-    if len(chk_ar) > 1:
-        count_str = chk_ar[1].split(']')[0]
-        if count_str in of_g.ofp_constants:
-            count = of_g.ofp_constants[count_str]
-        else:
-            count = int(count_str)
-        base_type = chk_ar[0]
-    else:
-        base_type = m_type
-    return count, base_type
+def generate(install_dir):
+    java_codegen.gen_all_java(install_dir)
diff --git a/lang_python.py b/lang_python.py
index 4d64e8d..019b62d 100644
--- a/lang_python.py
+++ b/lang_python.py
@@ -45,8 +45,14 @@
                 const.py        # OpenFlow constants
                 message.py      # Message classes
                 util.py         # Utility functions
-            of12: ...
-            of13: ...
+            of11: ...           # (code generation incomplete)
+                instruction.py  # Instruction classes
+            of12: ...           # (code generation incomplete)
+                oxm.py          # OXM classes
+            of13: ...           # (code generation incomplete)
+                action_id.py    # Action ID classes
+                instruction_id.py # Instruction ID classes
+                meter_band.py   # Meter band classes
 
 The user will add the pyloxi directory to PYTHONPATH. Then they can
 "import loxi" or "import loxi.of10". The idiomatic import is
@@ -57,20 +63,30 @@
 "ofp.OFPP_NONE".
 """
 
+import os
+from loxi_globals import OFVersions
+import loxi_globals
+import loxi_utils.loxi_utils as loxi_utils
 import py_gen
 import py_gen.util
 import py_gen.codegen
+import template_utils
 
 versions = {
     1: "of10",
     2: "of11",
     3: "of12",
-    4: "of13"
+    4: "of13",
 }
 
 prefix = 'pyloxi/loxi'
 
-modules = ["action", "common", "const", "message", "util"]
+modules = {
+    1: ["action", "common", "const", "message", "util"],
+    2: ["action", "common", "const", "instruction", "message", "util"],
+    3: ["action", "common", "const", "instruction", "message", "oxm", "util"],
+    4: ["action", "action_id", "common", "const", "instruction", "instruction_id", "message", "meter_band", "oxm", "bsn_tlv", "util"],
+}
 
 def make_gen(name, version):
     fn = getattr(py_gen.codegen, "generate_" + name)
@@ -82,10 +98,17 @@
 targets = {
     prefix+'/__init__.py': static('toplevel_init.py'),
     prefix+'/pp.py': static('pp.py'),
+    prefix+'/generic_util.py': static('generic_util.py'),
 }
 
 for version, subdir in versions.items():
     targets['%s/%s/__init__.py' % (prefix, subdir)] = make_gen('init', version)
-    for module in modules:
+    for module in modules[version]:
         filename = '%s/%s/%s.py' % (prefix, subdir, module)
-        targets[filename] = make_gen(module, version)
+        targets[filename] = make_gen(module, OFVersions.from_wire(version))
+
+def generate(install_dir):
+    py_gen.codegen.init()
+    for (name, fn) in targets.items():
+        with template_utils.open_output(install_dir, name) as outfile:
+            fn(outfile, os.path.basename(name))
diff --git a/loxi_front_end/c_parse_utils.py b/lang_wireshark.py
similarity index 66%
copy from loxi_front_end/c_parse_utils.py
copy to lang_wireshark.py
index 5e8d471..525b241 100644
--- a/loxi_front_end/c_parse_utils.py
+++ b/lang_wireshark.py
@@ -25,27 +25,18 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 
-##
-# @brief Utilities related to parsing C files
-#
-import of_g
+"""
+Wireshark dissector backend for LOXI
 
-def type_dec_to_count_base(m_type):
-    """
-    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
-    (uint8_t)
+Target directory structure:
+    wireshark:
+        openflow.lua
 
-    @param m_type The string type declaration to process
-    """
-    count = 1
-    chk_ar = m_type.split('[')
-    if len(chk_ar) > 1:
-        count_str = chk_ar[1].split(']')[0]
-        if count_str in of_g.ofp_constants:
-            count = of_g.ofp_constants[count_str]
-        else:
-            count = int(count_str)
-        base_type = chk_ar[0]
-    else:
-        base_type = m_type
-    return count, base_type
+The user will copy openflow.lua into ~/.wireshark/plugins, where it will be
+loaded automatically by Wireshark.
+"""
+
+import wireshark_gen
+
+def generate(install_dir):
+    wireshark_gen.generate(install_dir)
diff --git a/loxi_front_end/frontend.py b/loxi_front_end/frontend.py
new file mode 100644
index 0000000..bc79ed9
--- /dev/null
+++ b/loxi_front_end/frontend.py
@@ -0,0 +1,120 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+from generic_utils import find
+from collections import namedtuple
+import copy
+import loxi_globals
+import loxi_front_end.frontend_ir as ir
+
+class InputError(Exception):
+    pass
+
+
+FrontendCtx = namedtuple("FrontendCtx", ("used_enums"))
+
+def get_type(t_ast, ctx):
+    if t_ast[0] == "enum":
+        ctx.used_enums.add(t_ast[1])
+
+    return t_ast[1]
+
+def create_member(m_ast, ctx):
+    if m_ast[0] == 'pad':
+        return ir.OFPadMember(length=m_ast[1])
+    elif m_ast[0] == 'type':
+        return ir.OFTypeMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx), value=m_ast[3])
+    elif m_ast[0] == 'data':
+        if m_ast[2] == 'length' or m_ast[2] == 'len': # Should be moved to parser
+            return ir.OFLengthMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx))
+        elif m_ast[2] == 'actions_len':
+            return ir.OFFieldLengthMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx), field_name='actions')
+        if m_ast[2] == 'version': # Should be moved to parser
+            return ir.OFVersionMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx))
+        elif m_ast[2] == 'key_length':
+            return ir.OFFieldLengthMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx), field_name='key')
+        else:
+            return ir.OFDataMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx))
+    elif m_ast[0] == 'discriminator':
+        return ir.OFDiscriminatorMember(name=m_ast[2], oftype=get_type(m_ast[1], ctx))
+    else:
+        raise InputError("Dont know how to create member: %s" % m_ast[0])
+
+def create_ofinput(filename, ast):
+
+    """
+    Create an OFInput from an AST
+
+    @param ast An AST as returned by loxi_front_end.parser.parse
+
+    @returns An OFInput object
+    """
+    ctx = FrontendCtx(set())
+    ofinput = ir.OFInput(filename, wire_versions=set(), classes=[], enums=[])
+
+    for decl_ast in ast:
+        if decl_ast[0] == 'struct':
+            # 0: "struct"
+            # 1: name
+            # 2: potentially list of [param_name, param_value]
+            # 3: super_class or None
+            # 4: list of members
+            superclass = decl_ast[3]
+            members = [create_member(m_ast, ctx) for m_ast in decl_ast[4]]
+
+            discriminators = [ m for m in members if isinstance(m, ir.OFDiscriminatorMember) ]
+            if len(discriminators) > 1:
+                raise InputError("%s: Cannot support more than one discriminator by class - got %s" %
+                        (decl_ast[1], repr(discriminators)))
+            ofclass = ir.OFClass(name=decl_ast[1], members=members, superclass=superclass,
+                    virtual = len(discriminators) > 0,
+                    params = { param: value for param, value in decl_ast[2] })
+            ofinput.classes.append(ofclass)
+        if decl_ast[0] == 'enum':
+            # 0: "enum"
+            # 1: name
+            # 2: potentially list of [param_name, param_value]
+            # 3: list of [constant_name, constant_value]+
+            enum = ir.OFEnum(name=decl_ast[1],
+                    entries=[ir.OFEnumEntry(name=x[0], value=x[2], params={param:value for param, value in x[1] }) for x in decl_ast[3]],
+                    params = { param: value for param, value in decl_ast[2] }
+                    )
+            ofinput.enums.append(enum)
+        elif decl_ast[0] == 'metadata':
+            if decl_ast[1] == 'version':
+                if decl_ast[2] == 'any':
+                    ofinput.wire_versions.update(v.wire_version for v in loxi_globals.OFVersions.all_supported)
+                elif int(decl_ast[2]) in loxi_globals.OFVersions.wire_version_map:
+                    ofinput.wire_versions.add(int(decl_ast[2]))
+                else:
+                    raise InputError("Unrecognized wire protocol version %r" % decl_ast[2])
+                found_wire_version = True
+
+    if not ofinput.wire_versions:
+        raise InputError("Missing #version metadata")
+
+    return ofinput
diff --git a/loxi_front_end/frontend_ir.py b/loxi_front_end/frontend_ir.py
new file mode 100644
index 0000000..af3f223
--- /dev/null
+++ b/loxi_front_end/frontend_ir.py
@@ -0,0 +1,152 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+from generic_utils import find
+from collections import namedtuple
+
+# This module is represents the frontend IR.
+__all__ = [
+    'OFInput',
+    'OFClass',
+    'OFDataMember',
+    'OFTypeMember',
+    'OFDiscriminatorMember',
+    'OFLengthMember',
+    'OFFieldLengthMember',
+    'OFPadMember',
+    'OFVersionMember',
+    'OFEnum',
+    'OFEnumEntry'
+]
+
+"""
+One input file
+
+@param wire_versions Set of integer wire versions this file applies to
+@param classes List of OFClass objects in the same order as in the file
+@param enums List of Enum objects in the same order as in the file
+"""
+OFInput = namedtuple('OFInput', ['filename', 'wire_versions', 'classes', 'enums'])
+
+"""
+An OpenFlow class
+
+All compound objects like messages, actions, instructions, etc are
+uniformly represented by this class.
+
+The members are in the same order as on the wire.
+
+@param name
+@param superclass name of the super class
+@param members List of *Member objects
+@param params optional dictionary of parameters
+"""
+OFClass = namedtuple('OFClass', ['name', 'superclass', 'members', 'virtual', 'params'])
+
+"""
+Normal field
+
+@param name
+@param oftype C-like type string
+
+Example: packet_in.buffer_id
+"""
+OFDataMember = namedtuple('OFDataMember', ['name', 'oftype'])
+
+"""
+Field that declares that this is an abstract super-class and
+that the sub classes will be discriminated based on this field.
+E.g., 'type' is the discriminator member of the abstract superclass
+of_action.
+
+@param name
+"""
+OFDiscriminatorMember = namedtuple('OFDiscriminatorMember', ['name', 'oftype'])
+
+"""
+Field used to determine the type of an OpenFlow object
+
+@param name
+@param oftype C-like type string
+@param value Fixed type value
+
+Example: packet_in.type, flow_add._command
+"""
+OFTypeMember = namedtuple('OFTypeMember', ['name', 'oftype', 'value'])
+
+"""
+Field with the length of the containing object
+
+@param name
+@param oftype C-like type string
+
+Example: packet_in.length, action_output.len
+"""
+OFLengthMember = namedtuple('OFLengthMember', ['name', 'oftype'])
+
+"""
+Field with the length of another field in the containing object
+
+@param name
+@param oftype C-like type string
+@param field_name Peer field whose length this field contains
+
+Example: packet_out.actions_len (only usage)
+"""
+OFFieldLengthMember = namedtuple('OFFieldLengthMember', ['name', 'oftype', 'field_name'])
+
+"""
+Zero-filled padding
+
+@param length Length in bytes
+
+Example: packet_in.pad
+"""
+OFPadMember = namedtuple('OFPadMember', ['length'])
+
+"""
+Field with the version of an OpenFlow object
+
+@param name
+@param oftype C-like type string
+
+Example: hello.version
+"""
+OFVersionMember = namedtuple('OFVersionMember', ['name', 'oftype'])
+
+"""
+An OpenFlow enumeration
+
+All values are Python ints.
+
+@param name
+@param entries List of OFEnumEntry objects in input order
+@params dict of optional params. Currently defined:
+       - wire_type: the low_level type of the enum values (uint8,...)
+"""
+OFEnum = namedtuple('OFEnum', ['name', 'entries', 'params'])
+OFEnumEntry = namedtuple('OFEnumEntry', ['name', 'value', 'params'])
diff --git a/loxi_front_end/oxm.py b/loxi_front_end/oxm.py
deleted file mode 100644
index d4cf273..0000000
--- a/loxi_front_end/oxm.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright 2013, Big Switch Networks, Inc.
-#
-# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
-# the following special exception:
-#
-# LOXI Exception
-#
-# As a special exception to the terms of the EPL, you may distribute libraries
-# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
-# that copyright and licensing notices generated by LoxiGen are not altered or removed
-# from the LoxiGen Libraries and the notice provided below is (i) included in
-# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
-# documentation for the LoxiGen Libraries, if distributed in binary form.
-#
-# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
-#
-# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
-# a copy of the EPL at:
-#
-# http://www.eclipse.org/legal/epl-v10.html
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# EPL for the specific language governing permissions and limitations
-# under the EPL.
-
-import of_g
-
-oxm_types = dict(
-    in_port               = "of_port_no_t",
-    in_port_masked        = "of_port_no_t",
-    in_phy_port           = "of_port_no_t",
-    in_phy_port_masked    = "of_port_no_t",
-    metadata              = "uint64_t",
-    metadata_masked       = "uint64_t",
-    eth_dst               = "of_mac_addr_t",
-    eth_dst_masked        = "of_mac_addr_t",
-    eth_src               = "of_mac_addr_t",
-    eth_src_masked        = "of_mac_addr_t",
-    eth_type              = "uint16_t",
-    eth_type_masked       = "uint16_t",
-    vlan_vid              = "uint16_t",
-    vlan_vid_masked       = "uint16_t",
-    vlan_pcp              = "uint8_t",
-    vlan_pcp_masked       = "uint8_t",
-    ip_dscp               = "uint8_t",
-    ip_dscp_masked        = "uint8_t",
-    ip_ecn                = "uint8_t",
-    ip_ecn_masked         = "uint8_t",
-    ip_proto              = "uint8_t",
-    ip_proto_masked       = "uint8_t",
-    ipv4_src              = "uint32_t",
-    ipv4_src_masked       = "uint32_t",
-    ipv4_dst              = "uint32_t",
-    ipv4_dst_masked       = "uint32_t",
-    tcp_src               = "uint16_t",
-    tcp_src_masked        = "uint16_t",
-    tcp_dst               = "uint16_t",
-    tcp_dst_masked        = "uint16_t",
-    udp_src               = "uint16_t",
-    udp_src_masked        = "uint16_t",
-    udp_dst               = "uint16_t",
-    udp_dst_masked        = "uint16_t",
-    sctp_src              = "uint16_t",
-    sctp_src_masked       = "uint16_t",
-    sctp_dst              = "uint16_t",
-    sctp_dst_masked       = "uint16_t",
-    icmpv4_type           = "uint8_t",
-    icmpv4_type_masked    = "uint8_t",
-    icmpv4_code           = "uint8_t",
-    icmpv4_code_masked    = "uint8_t",
-    arp_op                = "uint16_t",
-    arp_op_masked         = "uint16_t",
-    arp_spa               = "uint32_t",
-    arp_spa_masked        = "uint32_t",
-    arp_tpa               = "uint32_t",
-    arp_tpa_masked        = "uint32_t",
-    arp_sha               = "of_mac_addr_t",
-    arp_sha_masked        = "of_mac_addr_t",
-    arp_tha               = "of_mac_addr_t",
-    arp_tha_masked        = "of_mac_addr_t",
-    ipv6_src              = "of_ipv6_t",
-    ipv6_src_masked       = "of_ipv6_t",
-    ipv6_dst              = "of_ipv6_t",
-    ipv6_dst_masked       = "of_ipv6_t",
-    ipv6_flabel           = "uint32_t",
-    ipv6_flabel_masked    = "uint32_t",
-    icmpv6_type           = "uint8_t",
-    icmpv6_type_masked    = "uint8_t",
-    icmpv6_code           = "uint8_t",
-    icmpv6_code_masked    = "uint8_t",
-    ipv6_nd_target        = "of_ipv6_t",
-    ipv6_nd_target_masked = "of_ipv6_t",
-    ipv6_nd_sll           = "of_mac_addr_t",
-    ipv6_nd_sll_masked    = "of_mac_addr_t",
-    ipv6_nd_tll           = "of_mac_addr_t",
-    ipv6_nd_tll_masked    = "of_mac_addr_t",
-    mpls_label            = "uint32_t",
-    mpls_label_masked     = "uint32_t",
-    mpls_tc               = "uint8_t",
-    mpls_tc_masked        = "uint8_t"
-    # FIXME Add 1.3 oxm elts
-    )
-
-oxm_wire_type = dict(
-    in_port               = (0 << 1),
-    in_port_masked        = (0 << 1) + 1,
-    in_phy_port           = (1 << 1),
-    in_phy_port_masked    = (1 << 1) + 1,
-    metadata              = (2 << 1),
-    metadata_masked       = (2 << 1) + 1,
-    eth_dst               = (3 << 1),
-    eth_dst_masked        = (3 << 1) + 1,
-    eth_src               = (4 << 1),
-    eth_src_masked        = (4 << 1) + 1,
-    eth_type              = (5 << 1),
-    eth_type_masked       = (5 << 1) + 1,
-    vlan_vid              = (6 << 1),
-    vlan_vid_masked       = (6 << 1) + 1,
-    vlan_pcp              = (7 << 1),
-    vlan_pcp_masked       = (7 << 1) + 1,
-    ip_dscp               = (8 << 1),
-    ip_dscp_masked        = (8 << 1) + 1,
-    ip_ecn                = (9 << 1),
-    ip_ecn_masked         = (9 << 1) + 1,
-    ip_proto              = (10 << 1),
-    ip_proto_masked       = (10 << 1) + 1,
-    ipv4_src              = (11 << 1),
-    ipv4_src_masked       = (11 << 1) + 1,
-    ipv4_dst              = (12 << 1),
-    ipv4_dst_masked       = (12 << 1) + 1,
-    tcp_src               = (13 << 1),
-    tcp_src_masked        = (13 << 1) + 1,
-    tcp_dst               = (14 << 1),
-    tcp_dst_masked        = (14 << 1) + 1,
-    udp_src               = (15 << 1),
-    udp_src_masked        = (15 << 1) + 1,
-    udp_dst               = (16 << 1),
-    udp_dst_masked        = (16 << 1) + 1,
-    sctp_src              = (17 << 1),
-    sctp_src_masked       = (17 << 1) + 1,
-    sctp_dst              = (18 << 1),
-    sctp_dst_masked       = (18 << 1) + 1,
-    icmpv4_type           = (19 << 1),
-    icmpv4_type_masked    = (19 << 1) + 1,
-    icmpv4_code           = (20 << 1),
-    icmpv4_code_masked    = (20 << 1) + 1,
-    arp_op                = (21 << 1),
-    arp_op_masked         = (21 << 1) + 1,
-    arp_spa               = (22 << 1),
-    arp_spa_masked        = (22 << 1) + 1,
-    arp_tpa               = (23 << 1),
-    arp_tpa_masked        = (23 << 1) + 1,
-    arp_sha               = (24 << 1),
-    arp_sha_masked        = (24 << 1) + 1,
-    arp_tha               = (25 << 1),
-    arp_tha_masked        = (25 << 1) + 1,
-    ipv6_src              = (26 << 1),
-    ipv6_src_masked       = (26 << 1) + 1,
-    ipv6_dst              = (27 << 1),
-    ipv6_dst_masked       = (27 << 1) + 1,
-    ipv6_flabel           = (28 << 1),
-    ipv6_flabel_masked    = (28 << 1) + 1,
-    icmpv6_type           = (29 << 1),
-    icmpv6_type_masked    = (29 << 1) + 1,
-    icmpv6_code           = (30 << 1),
-    icmpv6_code_masked    = (30 << 1) + 1,
-    ipv6_nd_target        = (31 << 1),
-    ipv6_nd_target_masked = (31 << 1) + 1,
-    ipv6_nd_sll           = (32 << 1),
-    ipv6_nd_sll_masked    = (32 << 1) + 1,
-    ipv6_nd_tll           = (33 << 1),
-    ipv6_nd_tll_masked    = (33 << 1) + 1,
-    mpls_label            = (34 << 1),
-    mpls_label_masked     = (34 << 1) + 1,
-    mpls_tc               = (35 << 1),
-    mpls_tc_masked        = (35 << 1) + 1
-    # FIXME Add 1.3 oxm elts
-)
-
-def add_oxm_classes_1_2(classes, version):
-    """
-    Add the OXM classes to object passed.  This is a dictionary
-    indexed by class name whose value is an array of member objects.
-    """
-    # First the parent class:
-    if version not in [of_g.VERSION_1_2, of_g.VERSION_1_3]:
-        return
-
-    members = []
-    classes["of_oxm"] = []
-    of_g.ordered_classes[version].append("of_oxm")
-    members.append(dict(name="type_len", m_type="uint32_t"))
-    classes["of_oxm_header"] = members
-    of_g.ordered_classes[version].append("of_oxm_header")
-
-    for oxm in oxm_types:
-        members = []
-        # Assert oxm_types[oxm] in of_base_types
-        m_type = oxm_types[oxm]
-        if m_type in of_g.of_mixed_types:
-            m_type = of_g.of_mixed_types[m_type][version]
-        # m_name = "value_" + of_g.of_base_types[m_type]["short_name"]
-        members.append(dict(name="type_len", m_type="uint32_t"))
-        # members.append(dict(name=m_name, m_type=oxm_types[oxm]))
-        members.append(dict(name="value", m_type=oxm_types[oxm]))
-        if oxm.find("_masked") > 0:
-            members.append(dict(name="value_mask", m_type=oxm_types[oxm]))
-            
-        name = "of_oxm_" + oxm
-        of_g.ordered_classes[version].append(name)
-        classes[name] = members
-        
-# /* Header for OXM experimenter match fields. */
-# struct ofp_oxm_experimenter_header {
-#     uint32_t oxm_header;        /* oxm_class = OFPXMC_EXPERIMENTER */
-#     uint32_t experimenter;      /* Experimenter ID which takes the same
-#                                    form as in struct ofp_experimenter_header. */
-# };
-
-
-# enum ofp_vlan_id {
-#     OFPVID_PRESENT = 0x1000, 
-#     OFPVID_NONE    = 0x0000, 
-# };
-
-# #define OFP_VLAN_NONE      OFPVID_NONE
diff --git a/loxi_front_end/parser.py b/loxi_front_end/parser.py
index d79dd3c..4a465f7 100644
--- a/loxi_front_end/parser.py
+++ b/loxi_front_end/parser.py
@@ -44,22 +44,44 @@
 identifier = word.copy().setName("identifier")
 
 # Type names
-scalar_type = word
-array_type = P.Combine(word + lit('[') - P.Word(P.alphanums + '_') - lit(']'))
-list_type = P.Combine(kw('list') - lit('(') - identifier - lit(')'))
-any_type = (array_type | list_type | scalar_type).setName("type name")
+enum_type = kw("enum") - word
+scalar_type = tag("scalar") + word
+array_type = tag("array") + P.Combine(word + lit('[') - P.Word(P.alphanums + '_') - lit(']'))
+list_type = tag("list") + P.Combine(kw('list') - lit('(') - identifier - lit(')'))
+any_type = P.Group(enum_type | array_type | list_type | scalar_type).setName("type name")
 
 # Structs
-struct_member = P.Group(any_type - identifier - s(';'))
-struct = kw('struct') - identifier - s('{') + \
-         P.Group(P.ZeroOrMore(struct_member)) + \
+pad_member = P.Group(kw('pad') - s('(') - integer - s(')'))
+discriminator_member = P.Group(tag('discriminator') + any_type + identifier + s('==') + s('?'))
+type_member = P.Group(tag('type') + any_type + identifier + s('==') + integer)
+data_member = P.Group(tag('data') + any_type - identifier)
+
+struct_param_name = kw("align") | kw("length_includes_align")
+struct_param = P.Group(struct_param_name - s('=') - word)
+struct_param_list = P.Forward()
+struct_param_list << struct_param + P.Optional(s(',') - P.Optional(struct_param_list))
+
+struct_member = pad_member | type_member | discriminator_member | data_member;
+parent = (s(':') - identifier) | tag(None)
+struct = kw('struct') - identifier - P.Group(P.Optional(s('(') - struct_param_list - s(')'))) - parent - s('{') + \
+         P.Group(P.ZeroOrMore(struct_member - s(';'))) + \
          s('}') - s(';')
 
 # Enums
-enum_member = P.Group(identifier + s('=') + integer)
+enum_param_name = kw("wire_type") | kw("bitmask") | kw("complete")
+enum_param = P.Group(enum_param_name  - s('=') - word)
+enum_param_list = P.Forward()
+enum_param_list << enum_param + P.Optional(s(',') + P.Optional(enum_param_list))
+
+enum_member_param_name = kw("virtual")
+enum_member_param = P.Group(enum_member_param_name  - s('=') - word)
+enum_member_param_list = P.Forward()
+enum_member_param_list << enum_member_param + P.Optional(s(',') + P.Optional(enum_member_param_list))
+
+enum_member = P.Group(identifier - P.Group(P.Optional(s('(') - enum_member_param_list - s(')'))) - s('=') + integer)
 enum_list = P.Forward()
 enum_list << enum_member + P.Optional(s(',') + P.Optional(enum_list))
-enum = kw('enum') - identifier - s('{') + \
+enum = kw('enum') - identifier - P.Group(P.Optional(s('(') - enum_param_list - s(')'))) - s('{') + \
          P.Group(P.Optional(enum_list)) + \
          s('}') - s(';')
 
@@ -71,4 +93,11 @@
 grammar.ignore(P.cppStyleComment)
 
 def parse(src):
-    return grammar.parseString(src, parseAll=True)
+    """
+    Given an input string, return the AST.
+
+    The AST is a low-level representation of the input. It changes frequently
+    with the input file syntax. The frontend.py module transforms the AST
+    into the OFInput represntation.
+    """
+    return grammar.parseString(src, parseAll=True).asList()
diff --git a/loxi_globals.py b/loxi_globals.py
new file mode 100644
index 0000000..e8b21bd
--- /dev/null
+++ b/loxi_globals.py
@@ -0,0 +1,73 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+from loxi_ir import *
+from collections import OrderedDict
+
+#######################################################################
+### OFVersion registry
+#######################################################################
+
+class OFVersions:
+    VERSION_1_0 = OFVersion("1.0", 1)
+    VERSION_1_1 = OFVersion("1.1", 2)
+    VERSION_1_2 = OFVersion("1.2", 3)
+    VERSION_1_3 = OFVersion("1.3", 4)
+
+    all_supported = (
+        VERSION_1_0,
+        VERSION_1_1,
+        VERSION_1_2,
+        VERSION_1_3,
+    )
+
+    wire_version_map   = { v.wire_version : v for v in all_supported }
+    version_string_map = { v.version      : v for v in all_supported }
+
+    target_versions = []
+
+    @staticmethod
+    def from_wire(w):
+        return OFVersions.wire_version_map[w]
+
+    @staticmethod
+    def from_string(s):
+        return OFVersions.version_string_map[s]
+
+    @staticmethod
+    def from_strings(*strings):
+        return tuple(OFVersions.version_string_map[s] for s in strings)
+
+
+
+
+#######################################################################
+### OFVersions
+#######################################################################
+
+# map OFVersion -> OFProtocol
+ir = OrderedDict()
diff --git a/loxi_front_end/c_parse_utils.py b/loxi_ir/__init__.py
similarity index 66%
copy from loxi_front_end/c_parse_utils.py
copy to loxi_ir/__init__.py
index 5e8d471..b4f1c05 100644
--- a/loxi_front_end/c_parse_utils.py
+++ b/loxi_ir/__init__.py
@@ -25,27 +25,7 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 
-##
-# @brief Utilities related to parsing C files
-#
-import of_g
-
-def type_dec_to_count_base(m_type):
-    """
-    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
-    (uint8_t)
-
-    @param m_type The string type declaration to process
-    """
-    count = 1
-    chk_ar = m_type.split('[')
-    if len(chk_ar) > 1:
-        count_str = chk_ar[1].split(']')[0]
-        if count_str in of_g.ofp_constants:
-            count = of_g.ofp_constants[count_str]
-        else:
-            count = int(count_str)
-        base_type = chk_ar[0]
-    else:
-        base_type = m_type
-    return count, base_type
+# Import the model
+from ir import *
+from ir import build_protocol
+from unified import build_unified_ir
diff --git a/loxi_ir/ir.py b/loxi_ir/ir.py
new file mode 100644
index 0000000..6a053c6
--- /dev/null
+++ b/loxi_ir/ir.py
@@ -0,0 +1,479 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+from itertools import chain
+import logging
+import re
+import sys
+
+from collections import namedtuple, OrderedDict
+from generic_utils import find, memoize, OrderedSet
+from loxi_ir import ir_offset
+import loxi_front_end.frontend_ir as frontend_ir
+
+logger = logging.getLogger(__name__)
+
+# This module is intended to be imported like this: from loxi_ir import *
+# All public names are prefixed with 'OF'.
+__all__ = [
+    'OFVersion',
+    'OFProtocol',
+    'OFClass',
+    'OFUnifiedClass',
+    'OFDataMember',
+    'OFTypeMember',
+    'OFDiscriminatorMember',
+    'OFLengthMember',
+    'OFFieldLengthMember',
+    'OFPadMember',
+    'OFEnum',
+    'OFEnumEntry'
+]
+
+"""
+One version of the OpenFlow protocol
+@param version Official dotted version number (e.g., "1.0", "1.3")
+@param wire_version Integer wire version (1 for 1.0, 4 for 1.3)
+"""
+class OFVersion(namedtuple("OFVersion", ("version", "wire_version"))):
+    @property
+    @memoize
+    def constant(self):
+        """ return this version as an uppercase string suitable
+            for use as a c constant, e.g., "VERSION_1_3"
+        """
+        return self.constant_version(prefix="VERSION_")
+
+    @property
+    @memoize
+    def short_constant(self):
+        """ return this version as an uppercase string suitable
+            for use as a c constant, e.g., "OF_"
+        """
+        return self.constant_version(prefix="OF_")
+
+    def constant_version(self, prefix="VERSION_"):
+        return prefix + self.version.replace(".", "_")
+
+    def __repr__(self):
+        return "OFVersion(%s)" % self.version
+
+    def __str__(self):
+        return self.version
+
+    def __cmp__(self, other):
+        return cmp(self.wire_version, other.wire_version)
+
+"""
+One version of the OpenFlow protocol
+
+Combination of multiple OFInput objects.
+
+@param wire_version
+@param classes List of OFClass objects
+@param enums List of Enum objects
+"""
+class OFProtocol(namedtuple('OFProtocol', ['version', 'classes', 'enums'])):
+    def __init__(self, version, classes, enums):
+        super(OFProtocol, self).__init__(self, version, classes, enums)
+        assert version is None or isinstance(version, OFVersion)
+
+    def class_by_name(self, name):
+        return find(lambda ofclass: ofclass.name == name, self.classes)
+
+    def enum_by_name(self, name):
+        return find(lambda enum: enum.name == name, self.enums)
+
+"""
+An OpenFlow class
+
+All compound objects like messages, actions, instructions, etc are
+uniformly represented by this class.
+
+The members are in the same order as on the wire.
+
+@param name
+@param superclass_name of this classes' super class
+@param members List of *Member objects
+@param params optional dictionary of parameters
+"""
+class OFClass(namedtuple('OFClass', ['name', 'superclass', 'members', 'virtual', 'params', 'is_fixed_length', 'base_length'])):
+    def __init__(self, *a, **kw):
+        super(OFClass, self).__init__(self, *a, **kw)
+        # Back reference will be added by assignment
+        self.protocol = None
+
+    def member_by_name(self, name):
+        return find(lambda m: hasattr(m, "name") and m.name == name, self.members)
+
+    @property
+    def discriminator(self):
+        return find(lambda m: type(m) == OFDiscriminatorMember, self.members)
+
+    def is_instanceof(self, super_class_name):
+        if self.name == super_class_name:
+            return True
+        elif self.superclass is None:
+            return False
+        else:
+            return self.superclass.is_instanceof(super_class_name)
+
+    def is_subclassof(self, super_class_name):
+        return self.name != super_class_name and self.is_instanceof(super_class_name)
+
+    @property
+    def is_message(self):
+        return self.is_instanceof("of_header")
+
+    @property
+    def is_oxm(self):
+        return self.is_instanceof("of_oxm")
+
+    @property
+    def is_action(self):
+        return self.is_instanceof("of_action")
+
+    @property
+    def is_action_id(self):
+        return self.is_instanceof("of_action_id")
+
+    @property
+    def is_instruction(self):
+        return self.is_instanceof("of_instruction")
+
+    def __hash__(self):
+        return hash((self.name, self.protocol.wire_version if self.protocol else None))
+
+    @property
+    def length(self):
+        if self.is_fixed_length:
+            return self.base_length
+        else:
+            raise Exception("Not a fixed length class: {}".format(self.name))
+
+    @property
+    def length_member(self):
+        return find(lambda m: type(m) == OFLengthMember, self.members)
+
+    @property
+    def has_internal_alignment(self):
+        return self.params.get('length_includes_align') == 'True'
+
+    @property
+    def has_external_alignment(self):
+        return self.params.get('length_includes_align') == 'False'
+
+    @property
+    def has_type_members(self):
+        return find(lambda m: isinstance(m, OFTypeMember), self.members) is not None
+
+""" one class unified across openflow versions. Keeps around a map version->versioned_class """
+class OFUnifiedClass(OFClass):
+    def __new__(cls, version_classes, *a, **kw):
+        return super(OFUnifiedClass, cls).__new__(cls, *a, **kw)
+
+    def __init__(self, version_classes, *a, **kw):
+        super(OFUnifiedClass, self).__init__(*a, **kw)
+        self.version_classes = version_classes
+
+    def class_by_version(self, version):
+        return self.version_classes[version]
+
+
+
+""" A mixin for member classes. Keeps around the back reference of_class (for assignment by
+    build_protocol, and additional methods shared across Members. """
+class MemberMixin(object):
+    def __init__(self, *a, **kw):
+        super(MemberMixin, self).__init__(*a, **kw)
+        # Back reference will be added by assignment in build_protocol below
+        self.of_class = None
+
+    @property
+    def length(self):
+        if self.is_fixed_length:
+            return self.base_length
+        else:
+            raise Exception("Not a fixed length member: {}.{} [{}]".format(
+                self.of_class.name,
+                self.name if hasattr("self", name) else "(unnnamed)",
+                type(self).__name__))
+
+"""
+Normal field
+
+@param name
+@param oftype C-like type string
+
+Example: packet_in.buffer_id
+"""
+class OFDataMember(namedtuple('OFDataMember', ['name', 'oftype', 'is_fixed_length', 'base_length', 'offset']), MemberMixin):
+    pass
+
+"""
+Field that declares that this is an abstract super-class and
+that the sub classes will be discriminated based on this field.
+E.g., 'type' is the discriminator member of the abstract superclass
+of_action.
+
+@param name
+"""
+class OFDiscriminatorMember (namedtuple('OFDiscriminatorMember', ['name', 'oftype', 'is_fixed_length', 'base_length', 'offset']), MemberMixin):
+    pass
+
+"""
+Field used to determine the type of an OpenFlow object
+
+@param name
+@param oftype C-like type string
+@param value Fixed type value
+
+Example: packet_in.type, flow_add._command
+"""
+class OFTypeMember (namedtuple('OFTypeMember', ['name', 'oftype', 'value', 'is_fixed_length', 'base_length', 'offset']), MemberMixin):
+    pass
+
+"""
+Field with the length of the containing object
+
+@param name
+@param oftype C-like type string
+
+Example: packet_in.length, action_output.len
+"""
+class OFLengthMember (namedtuple('OFLengthMember', ['name', 'oftype', 'is_fixed_length', 'base_length', 'offset']), MemberMixin):
+    pass
+
+"""
+Field with the length of another field in the containing object
+
+@param name
+@param oftype C-like type string
+@param field_name Peer field whose length this field contains
+
+Example: packet_out.actions_len (only usage)
+"""
+class OFFieldLengthMember (namedtuple('OFFieldLengthMember', ['name', 'oftype', 'field_name', 'is_fixed_length', 'base_length', 'offset']), MemberMixin):
+    pass
+
+"""
+Zero-filled padding
+
+@param length Length in bytes
+
+Example: packet_in.pad
+"""
+class OFPadMember (namedtuple('OFPadMember', ['pad_length', 'is_fixed_length', 'base_length', 'offset']), MemberMixin):
+    pass
+
+"""
+An OpenFlow enumeration
+
+All values are Python ints.
+
+@param name
+@param entries List of OFEnumEntry objects in input order
+@params dict of optional params. Currently defined:
+       - wire_type: the low_level type of the enum values (uint8,...)
+"""
+class OFEnum(namedtuple('OFEnum', ['name', 'entries', 'params'])):
+    def __init__(self, *a, **kw):
+        super(OFEnum, self).__init__(*a, **kw)
+        # Back reference will be added by assignment
+        self.protocol = None
+
+    @property
+    def values(self):
+        return [(e.name, e.value) for e in self.entries]
+
+    @property
+    def is_bitmask(self):
+        return "bitmask" in self.params and self.params['bitmask']
+
+    @property
+    def wire_type(self):
+        return self.params['wire_type'] if 'wire_type' in self.params else self.name
+
+class OFEnumEntry(namedtuple('OFEnumEntry', ['name', 'value', 'params'])):
+    def __init__(self, *a, **kw):
+        super(OFEnumEntry, self).__init__(*a, **kw)
+        # Back reference will be added by assignment
+        self.enum = None
+
+class RedefinedException(Exception):
+    pass
+
+class ClassNotFoundException(Exception):
+    pass
+
+class DependencyCycleException(Exception):
+    pass
+
+def build_protocol(version, ofinputs):
+    name_frontend_classes = OrderedDict()
+    name_frontend_enums = OrderedDict()
+
+    for ofinput in ofinputs:
+        for c in ofinput.classes:
+            name = c.name
+            if name in name_frontend_classes:
+                raise RedefinedException("Error parsing {}. Class {} redefined (already defined in {})"
+                        .format(ofinput.filename, name,
+                            name_frontend_classes[name][1].filename))
+            else:
+                name_frontend_classes[name] = (c, ofinput)
+        for e in ofinput.enums:
+            name = e.name
+            if name in name_frontend_enums:
+                raise RedefinedException("Error parsing {}. Enum {} redefined (already defined in {})"
+                        .format(ofinput.filename, name,
+                            name_frontend_enums[name][1].filename))
+            else:
+                name_frontend_enums[name] = (e, ofinput)
+
+    name_enums = {}
+    for fe, _ in name_frontend_enums.values():
+        entries = tuple(OFEnumEntry(name=e.name, value=e.value,
+                        params=e.params) for e in fe.entries)
+        enum = OFEnum(name=fe.name,
+                      entries=entries,
+                      params=fe.params)
+        for e in entries:
+            e.enum = enum
+        name_enums[enum.name] = enum
+
+    name_classes = OrderedDict()
+    build_touch_classes = OrderedSet()
+
+    def convert_member_properties(props):
+        return { name if name != "length" else "pad_length" : value for name, value in props.items() }
+
+    def build_member(of_class, fe_member, length_info):
+        if isinstance(fe_member, frontend_ir.OFVersionMember):
+            member = OFTypeMember(offset = length_info.offset,
+                                  base_length = length_info.base_length,
+                                  is_fixed_length=length_info.is_fixed_length,
+                                  value = version.wire_version,
+                                  **convert_member_properties(fe_member._asdict()))
+        else:
+            ir_class = globals()[type(fe_member).__name__]
+            member = ir_class(offset = length_info.offset,
+                              base_length = length_info.base_length,
+                              is_fixed_length=length_info.is_fixed_length,
+                              **convert_member_properties(fe_member._asdict()))
+        member.of_class = of_class
+        return member
+
+    def build_class(name):
+        if name in name_classes:
+            return name_classes[name]
+        if name in build_touch_classes:
+            raise DependencyCycleException( "Dependency cycle: {}"
+                    .format(" -> ".join(list(build_touch_classes) + [name])))
+        if not name in name_frontend_classes:
+            raise ClassNotFoundException("Class not found: {}".format(name))
+
+        build_touch_classes.add(name)
+
+        fe, _ = name_frontend_classes[name]
+
+        superclass = build_class(fe.superclass) if fe.superclass else None
+
+        # make sure members on which we depend are built first (for calc_length)
+        for m in fe.members:
+            if not hasattr(m, "oftype"):
+                continue
+            for m_name in re.sub(r'_t$', '', m.oftype), m.oftype:
+                logger.debug("Checking {}".format(m_name))
+                if m_name in name_frontend_classes:
+                    build_class(m_name)
+
+        base_length, is_fixed_length, member_lengths = \
+           ir_offset.calc_lengths(version, fe, name_classes, name_enums)
+
+        members = []
+        c = OFClass(name=fe.name, superclass=superclass,
+                members=members, virtual=fe.virtual, params=fe.params,
+                is_fixed_length=is_fixed_length, base_length=base_length)
+
+        members.extend( build_member(c, fe_member, member_lengths[fe_member])
+                  for fe_member in fe.members)
+
+        name_classes[name] = c
+        build_touch_classes.remove(name)
+        return c
+
+    def build_id_class(orig_name, base_name):
+        name = base_name + '_id' + orig_name[len(base_name):]
+        if name in name_classes:
+            return name_classes[name]
+        orig_fe, _ = name_frontend_classes[orig_name]
+
+        if orig_fe.superclass:
+            superclass_name = base_name + '_id' + orig_fe.superclass[len(base_name):]
+            superclass = build_id_class(orig_fe.superclass, base_name)
+        else:
+            superclass_name = None
+            superclass = None
+
+        fe = frontend_ir.OFClass(
+            name=name,
+            superclass=superclass_name,
+            members=[m for m in orig_fe.members if not isinstance(m, frontend_ir.OFDataMember)],
+            virtual=orig_fe.virtual,
+            params={})
+
+        base_length, is_fixed_length, member_lengths = \
+           ir_offset.calc_lengths(version, fe, name_classes, name_enums)
+        assert fe.virtual or is_fixed_length
+
+        members = []
+        c = OFClass(name=fe.name, superclass=superclass,
+                members=members, virtual=fe.virtual, params=fe.params,
+                is_fixed_length=is_fixed_length, base_length=base_length)
+
+        members.extend( build_member(c, fe_member, member_lengths[fe_member])
+                  for fe_member in fe.members)
+
+        name_classes[name] = c
+        return c
+
+    id_class_roots = ["of_action", "of_instruction"]
+
+    for name in sorted(name_frontend_classes.keys()):
+        c = build_class(name)
+
+        # Build ID classes for OF 1.3+
+        if version.wire_version >= 4:
+            for root in id_class_roots:
+                if c.is_instanceof(root):
+                    build_id_class(name, root)
+
+    protocol = OFProtocol(version=version, classes=tuple(name_classes.values()), enums=tuple(name_enums.values()))
+    for e in chain(protocol.classes, protocol.enums):
+        e.protocol = protocol
+    return protocol
diff --git a/loxi_ir/ir_offset.py b/loxi_ir/ir_offset.py
new file mode 100644
index 0000000..e705fcc
--- /dev/null
+++ b/loxi_ir/ir_offset.py
@@ -0,0 +1,193 @@
+## List of mixed data types
+#
+# This is a list of data types which require special treatment
+# because the underlying datatype has changed between versions.
+# The main example is port which went from 16 to 32 bits.  We
+# define per-version accessors for these types and those are
+# used in place of the normal ones.
+#
+# The wire protocol number is used to identify versions.  For now,
+# the value is the name of the type to use for that version
+#
+# This is the map between the external type (like of_port_no_t)
+# which is used by customers of this code and the internal
+# datatypes (like uint16_t) that appear on the wire for a
+# particular version.
+#
+from collections import namedtuple
+import logging
+
+import loxi_front_end.frontend_ir as fe
+import loxi_ir.ir
+
+ofp_constants = dict(
+    OF_MAX_TABLE_NAME_LEN = 32,
+    OF_MAX_PORT_NAME_LEN  = 16,
+    OF_ETH_ALEN = 6,
+    OF_DESC_STR_LEN   = 256,
+    OF_SERIAL_NUM_LEN = 32
+)
+
+
+of_mixed_types = dict(
+    of_port_no_t = {
+        1: "uint16_t",
+        2: "uint32_t",
+        3: "uint32_t",
+        4: "uint32_t",
+        "short_name":"port_no"
+        },
+    of_port_desc_t = {
+        1: "of_port_desc_t",
+        2: "of_port_desc_t",
+        3: "of_port_desc_t",
+        4: "of_port_desc_t",
+        "short_name":"port_desc"
+        },
+    of_bsn_vport_t = {
+        1: "of_bsn_vport_t",
+        2: "of_bsn_vport_t",
+        3: "of_bsn_vport_t",
+        4: "of_bsn_vport_t",
+        "short_name":"bsn_vport"
+        },
+    of_fm_cmd_t = { # Flow mod command went from u16 to u8
+        1: "uint16_t",
+        2: "uint8_t",
+        3: "uint8_t",
+        4: "uint8_t",
+        "short_name":"fm_cmd"
+        },
+    of_wc_bmap_t = { # Wildcard bitmap
+        1: "uint32_t",
+        2: "uint32_t",
+        3: "uint64_t",
+        4: "uint64_t",
+        "short_name":"wc_bmap"
+        },
+    of_match_bmap_t = { # Match bitmap
+        1: "uint32_t",
+        2: "uint32_t",
+        3: "uint64_t",
+        4: "uint64_t",
+        "short_name":"match_bmap"
+        },
+    of_match_t = { # Match object
+        1: "of_match_v1_t",
+        2: "of_match_v2_t",
+        3: "of_match_v3_t",
+        4: "of_match_v3_t",  # Currently uses same match as 1.2 (v3).
+        "short_name":"match"
+        },
+)
+
+## basic lengths
+of_base_lengths = dict(
+    char     = (1, True),
+    uint8_t  = (1, True),
+    uint16_t = (2, True),
+    uint32_t = (4, True),
+    uint64_t = (8, True),
+    of_mac_addr_t = (6, True),
+    of_ipv4_t = (4, True),
+    of_ipv6_t = (16, True),
+    of_port_name_t = (ofp_constants["OF_MAX_PORT_NAME_LEN"], True),
+    of_table_name_t = (ofp_constants["OF_MAX_TABLE_NAME_LEN"], True),
+    of_desc_str_t = (ofp_constants["OF_DESC_STR_LEN"], True),
+    of_serial_num_t = (ofp_constants["OF_SERIAL_NUM_LEN"], True),
+    of_match_v1_t = (40, True),
+    of_match_v2_t = (88, True),
+    of_match_v3_t = (8, False),
+    of_octets_t = (0, False),
+    of_bitmap_128_t = (16, True),
+    of_checksum_128_t = (16, True),
+)
+
+def type_dec_to_count_base(m_type):
+    """
+    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
+    (uint8_t)
+
+    @param m_type The string type declaration to process
+    """
+    count = 1
+    chk_ar = m_type.split('[')
+    if len(chk_ar) > 1:
+        count_str = chk_ar[1].split(']')[0]
+        if count_str in ofp_constants:
+            count = ofp_constants[count_str]
+        else:
+            count = int(count_str)
+        base_type = chk_ar[0]
+    else:
+        base_type = m_type
+    return count, base_type
+
+
+LengthInfo = namedtuple("LengthInfo", ("offset", "base_length", "is_fixed_length"))
+
+def calc_lengths(version, fe_class, existing_classes, existing_enums):
+    offset_fixed = True
+    offset = 0
+
+    member_infos = {}
+    for member in fe_class.members:
+        member_offset = offset if offset_fixed else None
+
+        if isinstance(member, fe.OFPadMember):
+            member_base_length = member.length
+            member_fixed_length = True
+        else:
+            m_type = member.oftype
+            name = member.name
+
+            member_base_length = 0
+            if m_type.find("list(") == 0:
+                member_fixed_length = False
+            elif m_type.find("struct") == 0:
+                raise Exception("Error: recursive struct found: {}, {}"
+                                    .format(fe_class.name, name))
+            elif m_type == "octets":
+                member_fixed_length = False
+            else:
+                member_base_length, member_fixed_length = member_length(version, fe_class, member, existing_classes, existing_enums)
+
+        if not member_fixed_length:
+            offset_fixed = False
+
+        member_infos[member] = LengthInfo(member_offset, member_base_length,
+                member_fixed_length)
+        offset += member_base_length
+
+    base_length = offset
+    fixed_length = offset_fixed if not fe_class.virtual else False
+    return (base_length, fixed_length, member_infos)
+
+def member_length(version, fe_class, fe_member, existing_classes, existing_enums):
+    """
+    return the length of an ir member.
+
+    @return tuple (base_length, length_fixed)
+    """
+    count, base_type = type_dec_to_count_base(fe_member.oftype)
+
+    len_update = 0
+    if base_type in of_mixed_types:
+        base_type = of_mixed_types[base_type][version.wire_version]
+
+    base_class = base_type[:-2]
+    if base_class in existing_classes:
+        member_ir_class = existing_classes[base_class]
+        bytes = member_ir_class.base_length
+        length_fixed = member_ir_class.is_fixed_length
+    else:
+        if base_type in existing_enums:
+            enum = existing_enums[base_type]
+            base_type = enum.wire_type
+
+        if base_type in of_base_lengths:
+            bytes, length_fixed = of_base_lengths[base_type]
+        else:
+            raise Exception("Unknown type for {}.{}: {}".format(fe_class.name, fe_member.name, base_type))
+
+    return (count * bytes), length_fixed
diff --git a/loxi_ir/unified.py b/loxi_ir/unified.py
new file mode 100644
index 0000000..41b86dc
--- /dev/null
+++ b/loxi_ir/unified.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import copy
+from collections import OrderedDict
+from itertools import chain
+import logging
+
+import ir
+
+def build_unified_ir(name_protocol_map):
+    class UnifiedClassSpec(object):
+        def __init__(self, name):
+            self.name = name
+            self.members = OrderedDict()
+            self.superclass_name = None
+            self.superclass_set = False
+            self.params = OrderedDict()
+            self.version_class = OrderedDict()
+            self.virtual = False
+            self.base_length = None
+            self.is_fixed_length = True
+
+        def add_class(self, version, v_class):
+            for v_member in v_class.members:
+                if hasattr(v_member, "name"):
+                    if not v_member.name in self.members:
+                        self.members[v_member.name] = v_member
+                    else:
+                        if not type(self.members[v_member.name]) == type(v_member):
+                            raise Exception("Error unifying ir class {} - adding version: {} - member_type {} <-> {}".format(
+                                    self.name, v_class.protocol.version, self.members[v_member.name], v_member))
+
+            if not self.superclass_set:
+                self.superclass_name = v_class.superclass.name if v_class.superclass else None
+            else:
+                if self.superclass_name != v_class.superclass_name:
+                    raise Exception("Error unifying ir class {} - adding version {} - superclass: param {} <-> {}".format(
+                            self.name, v_class.protocol.version, self.superclass_name, v_class.superclass_name))
+
+            for name, value in v_class.params.items():
+                if not name in self.params:
+                    self.params[name] = value
+                else:
+                    if self.params[name] != value:
+                        raise Exception("Error unifying ir class {} - adding version: {} - param {} <-> {}".format(
+                                self.name, v_class.protocol.version, self.params[name], value))
+
+            if v_class.virtual:
+                self.virtual = True
+
+            if not v_class.is_fixed_length:
+                self.is_fixed_length = False
+
+            if self.base_length is None:
+                self.base_length = v_class.base_length
+            elif self.base_length != v_class.base_length:
+                self.is_fixed_length = False
+                if self.base_length > v_class.base_length:
+                    self.base_length = v_class.base_length
+            self.version_class[version] = v_class
+
+    class UnifiedEnumSpec(object):
+        def __init__(self, name):
+            self.name = name
+            self.entries = {}
+            self.params = {}
+            self.version_enums = OrderedDict()
+
+        def add_enum(self, version, v_enum):
+            for e in v_enum.entries:
+                if not e.name in self.entries:
+                    self.entries[e.name] = ir.OFEnumEntry(e.name, e.value, copy.copy(e.params))
+                else:
+                    entry = self.entries[e.name]
+                    for name, value in e.params.items():
+                        if not name in entry.params:
+                            entry.params[name] = value
+                        elif entry.params[name] != value:
+                            raise Exception("Error unifying ir enum {} - adding version: param {} <-> {}".format(
+                                self.name, entry.params[name], value))
+            for name, value in v_enum.params.items():
+                if not name in self.params:
+                    self.params[name] = value
+                else:
+                    if self.params[name] != value:
+                        if name == "wire_type":
+                            self.params[name] = None
+                        else:
+                            raise Exception("Error unifying ir enum {} - adding version: {} param {} <-> {}".format(
+                                self.name, v_enum.protocol.version, self.params[name], value))
+
+            self.version_enums[version]=v_enum
+
+    u_name_classes = OrderedDict()
+    u_name_enums = OrderedDict()
+
+    for version, protocol in name_protocol_map.items():
+        assert isinstance(version, ir.OFVersion)
+        for v_class in protocol.classes:
+            name = v_class.name
+            if not name in u_name_classes:
+                u_name_classes[name] = UnifiedClassSpec(name)
+            spec = u_name_classes[name]
+            spec.add_class(version, v_class)
+
+        for v_enum in protocol.enums:
+            name = v_enum.name
+            if not name in u_name_enums:
+                u_name_enums[name] = UnifiedEnumSpec(name)
+            spec = u_name_enums[name]
+            spec.add_enum(version, v_enum)
+
+    unified_enums = tuple(ir.OFEnum(name=s.name, entries=tuple(s.entries.values()), params=s.params) for s in u_name_enums.values())
+    unified_classes = OrderedDict()
+    for name, spec in u_name_classes.items():
+        u = ir.OFUnifiedClass(
+                name = spec.name,
+                version_classes=spec.version_class,
+                superclass=None if not spec.superclass_name else unified_classes[spec.superclass_name],
+                members=spec.members.values(),
+                virtual=spec.virtual,
+                params=spec.params,
+                base_length=spec.base_length,
+                is_fixed_length=spec.is_fixed_length)
+        unified_classes[name] = u
+
+    unified = ir.OFProtocol(version=None, classes = tuple(unified_classes.values()), enums=unified_enums)
+    for e in chain(unified.classes, unified.enums):
+        e.protocol = unified
+    return unified
diff --git a/loxi_utils/loxi_utils.py b/loxi_utils/loxi_utils.py
index 5508c94..865891f 100644
--- a/loxi_utils/loxi_utils.py
+++ b/loxi_utils/loxi_utils.py
@@ -28,43 +28,16 @@
 """
 @brief Utilities involving LOXI naming conventions
 
-Utility functions for OpenFlow class generation 
+Utility functions for OpenFlow class generation
 
 These may need to be sorted out into language specific functions
 """
 
+import re
 import sys
-import of_g
-import tenjin
 
-def class_signature(members):
-    """
-    Generate a signature string for a class in canonical form
-
-    @param cls The class whose signature is to be generated
-    """
-    return ";".join([",".join([x["m_type"], x["name"], str(x["offset"])])
-                     for x in members])
-
-def type_dec_to_count_base(m_type):
-    """
-    Resolve a type declaration like uint8_t[4] to a count (4) and base_type
-    (uint8_t)
-
-    @param m_type The string type declaration to process
-    """
-    count = 1
-    chk_ar = m_type.split('[')
-    if len(chk_ar) > 1:
-        count_str = chk_ar[1].split(']')[0]
-        if count_str in of_g.ofp_constants:
-            count = of_g.ofp_constants[count_str]
-        else:
-            count = int(count_str)
-        base_type = chk_ar[0]
-    else:
-        base_type = m_type
-    return count, base_type
+import loxi_globals
+from generic_utils import find, memoize
 
 ##
 # Class types:
@@ -88,51 +61,30 @@
 #
 #
 
+class NoneClass(object):
+    def is_instanceof(self, x):
+        return False
+none_item = NoneClass()
+
+def _unified_by_name(cls):
+    c = loxi_globals.unified.class_by_name(cls)
+    return c if c is not None else none_item
+
+@memoize
 def class_is_message(cls):
     """
     Return True if cls is a message object based on info in unified
     """
-    return "xid" in of_g.unified[cls]["union"] and cls != "of_header"
-
-def class_is_tlv16(cls):
-    """
-    Return True if cls_name is an object which uses uint16 for type and length
-    """
-    if cls.find("of_action") == 0: # Includes of_action_id classes
-        return True
-    if cls.find("of_instruction") == 0:
-        return True
-    if cls.find("of_queue_prop") == 0:
-        return True
-    if cls.find("of_table_feature_prop") == 0:
-        return True
-    # *sigh*
-    if cls.find("of_meter_band_stats") == 0:  # NOT A TLV
+    if cls == "of_header":
         return False
-    if cls.find("of_meter_band") == 0:
-        return True
-    if cls.find("of_hello_elem") == 0:
-        return True
-    if cls == "of_match_v3":
-        return True
-    if cls == "of_match_v4":
-        return True
-    return False
-
-def class_is_u16_len(cls):
-    """
-    Return True if cls_name is an object which uses initial uint16 length
-    """
-    return cls in ["of_group_desc_stats_entry", "of_group_stats_entry",
-                   "of_flow_stats_entry", "of_bucket", "of_table_features"]
+    else:
+        return _unified_by_name(cls).is_instanceof("of_header")
 
 def class_is_oxm(cls):
     """
     Return True if cls_name is an OXM object
     """
-    if cls.find("of_oxm") == 0:
-        return True
-    return False
+    return _unified_by_name(cls).is_instanceof("of_oxm")
 
 def class_is_action(cls):
     """
@@ -143,17 +95,7 @@
     is used to identify a kind of action, it does not indicate the
     type of the object following.
     """
-    if cls.find("of_action_id") == 0:
-        return False
-    if cls.find("of_action") == 0:
-        return True
-
-    # For each vendor, check for vendor specific action
-    for exp in of_g.experimenter_name_to_id:
-        if cls.find("of_action" + exp) == 0:
-            return True
-
-    return False
+    return _unified_by_name(cls).is_instanceof("of_action")
 
 def class_is_action_id(cls):
     """
@@ -164,77 +106,50 @@
     is used to identify a kind of action, it does not indicate the
     type of the object following.
     """
-    if cls.find("of_action_id") == 0:
-        return True
-
-    # For each vendor, check for vendor specific action
-    for exp in of_g.experimenter_name_to_id:
-        if cls.find("of_action_id_" + exp) == 0:
-            return True
-
-    return False
+    return _unified_by_name(cls).is_instanceof("of_action_id")
 
 def class_is_instruction(cls):
     """
     Return True if cls_name is an instruction object
     """
-    if cls.find("of_instruction") == 0:
-        return True
-
-    # For each vendor, check for vendor specific action
-    for exp in of_g.experimenter_name_to_id:
-        if cls.find("of_instruction_" + exp) == 0:
-            return True
-
-    return False
+    return _unified_by_name(cls).is_instanceof("of_instruction")
 
 def class_is_meter_band(cls):
     """
     Return True if cls_name is an instruction object
     """
-    # meter_band_stats is not a member of meter_band class hierarchy
-    if cls.find("of_meter_band_stats") == 0:
-        return False
-    if cls.find("of_meter_band") == 0:
-        return True
-    return False
+    return _unified_by_name(cls).is_instanceof("of_meter_band")
 
 def class_is_hello_elem(cls):
     """
     Return True if cls_name is an instruction object
     """
-    if cls.find("of_hello_elem") == 0:
-        return True
-    return False
+    return _unified_by_name(cls).is_instanceof("of_hello_elem")
 
 def class_is_queue_prop(cls):
     """
     Return True if cls_name is a queue_prop object
     """
-    if cls.find("of_queue_prop") == 0:
-        return True
-
-    # For each vendor, check for vendor specific action
-    for exp in of_g.experimenter_name_to_id:
-        if cls.find("of_queue_prop_" + exp) == 0:
-            return True
-
-    return False
+    return _unified_by_name(cls).is_instanceof("of_queue_prop")
 
 def class_is_table_feature_prop(cls):
     """
     Return True if cls_name is a queue_prop object
     """
-    if cls.find("of_table_feature_prop") == 0:
-        return True
-    return False
+    return _unified_by_name(cls).is_instanceof("of_table_feature_prop")
 
 def class_is_stats_message(cls):
     """
     Return True if cls_name is a message object based on info in unified
     """
+    u = _unified_by_name(cls)
+    return u.is_instanceof("of_stats_request") or u.ir_instanceof("of_stats_reply")
 
-    return "stats_type" in of_g.unified[cls]["union"]
+def class_is_bsn_tlv(cls):
+    """
+    Return True if cls_name is a bsn_tlv object
+    """
+    return _unified_by_name(cls).is_instanceof("of_bsn_tlv")
 
 def class_is_list(cls):
     """
@@ -242,295 +157,23 @@
     """
     return (cls.find("of_list_") == 0)
 
+def class_is(cls, cand_name):
+    return _unified_by_name(cls).is_instanceof(cand_name)
+
 def type_is_of_object(m_type):
     """
     Return True if m_type is an OF object type
     """
     # Remove _t from the type id and see if key for unified class
-    if m_type[-2:] == "_t":
-        m_type = m_type[:-2]
-    return m_type in of_g.unified
+    return _unified_by_name(re.sub(r'_t$', '', m_type)) != none_item
 
-def list_to_entry_type(cls):
-    """
-    Return the entry type for a list
-    """
-    slen = len("of_list_")
-    return "of_" + cls[slen:] 
-
-def type_to_short_name(m_type):
-    if m_type in of_g.of_base_types:
-        tname = of_g.of_base_types[m_type]["short_name"]
-    elif m_type in of_g.of_mixed_types:
-        tname = of_g.of_mixed_types[m_type]["short_name"]
+@memoize
+def lookup_ir_wiretype(oftype, version):
+    """ if of is a reference to an enum in ir, resolve it to the wiretype
+        declared in that enum. Else return oftype """
+    enums = loxi_globals.ir[version].enums
+    enum = find(lambda e: e.name == oftype, enums)
+    if enum and 'wire_type' in enum.params:
+        return enum.params['wire_type']
     else:
-        tname = "unknown"
-    return tname
-
-def type_to_name_type(cls, member_name):
-    """
-    Generate the root name of a member for accessor functions, etc
-    @param cls The class name
-    @param member_name The member name
-    """
-    members = of_g.unified[cls]["union"]
-    if not member_name in members:
-        debug("Error:  %s is not in class %s for acc_name defn" %
-              (member_name, cls))
-        os.exit()
-
-    mem = members[member_name]
-    m_type = mem["m_type"]
-    id = mem["memid"]
-    tname = type_to_short_name(m_type)
-
-    return "o%d_m%d_%s" % (of_g.unified[cls]["object_id"], id, tname)
-
-
-def member_to_index(m_name, members):
-    """
-    Given a member name, return the index in the members dict
-    @param m_name The name of the data member to search for
-    @param members The dict of members
-    @return Index if found, -1 not found
-
-    Note we could generate an index when processing the original input
-    """
-    count = 0
-    for d in members:
-        if d["name"] == m_name:
-            return count
-        count += 1
-    return -1
-
-def member_base_type(cls, m_name):
-    """
-    Map a member to its of_ type
-    @param cls The class name
-    @param m_name The name of the member being gotten
-    @return The of_ type of the member
-    """
-    rv = of_g.unified[cls]["union"][m_name]["m_type"]
-    if rv[-2:] == "_t":
-        return rv
-    return rv + "_t"
-
-def member_type_is_octets(cls, m_name):
-    return member_base_type(cls, m_name) == "of_octets_t"
-
-def member_returns_val(cls, m_name):
-    """
-    Should get accessor return a value rather than void
-    @param cls The class name
-    @param m_name The member name
-    @return True if of_g config and the specific member allow a 
-    return value.  Otherwise False
-    """
-    m_type = of_g.unified[cls]["union"][m_name]["m_type"]
-    return (config_check("get_returns") =="value" and 
-            m_type in of_g.of_scalar_types)
-
-def config_check(str, dictionary = of_g.code_gen_config):
-    """
-    Return config value if in dictionary; else return False.
-    @param str The lookup index
-    @param dictionary The dict to check; use code_gen_config if None
-    """
-
-    if str in dictionary:
-        return dictionary[str]
-
-    return False
-
-def h_file_to_define(name):
-    """
-    Convert a .h file name to the define used for the header
-    """
-    h_name = name[:-2].upper()
-    h_name = "_" + h_name + "_H_"
-    return h_name
-
-def type_to_cof_type(m_type):
-    if m_type in of_g.of_base_types:
-        if "cof_type" in of_g.of_base_types[m_type]:
-            return of_g.of_base_types[m_type]["cof_type"]
-    return m_type
-
-            
-def member_is_scalar(cls, m_name):
-    return of_g.unified[cls]["union"][m_name]["m_type"] in of_g.of_scalar_types
-
-def type_is_scalar(m_type):
-    return m_type in of_g.of_scalar_types
-
-def skip_member_name(name):
-    return name.find("pad") == 0 or name in of_g.skip_members
-
-def enum_name(cls):
-    """
-    Return the name used for an enum identifier for the given class
-    @param cls The class name
-    """
-    return cls.upper()
-
-def class_in_version(cls, ver):
-    """
-    Return boolean indicating if cls is defined for wire version ver
-    """
-
-    return (cls, ver) in of_g.base_length
-
-def instance_to_class(instance, parent):
-    """
-    Return the name of the class for an instance of inheritance type parent
-    """
-    return parent + "_" + instance
-
-def sub_class_to_var_name(cls):
-    """
-    Given a subclass name like of_action_output, generate the
-    name of a variable like 'output'
-    @param cls The class name
-    """
-    pass
-
-def class_is_var_len(cls, version):
-    # Match is special case.  Only version 1.2 (wire version 3) is var
-    if cls == "of_match":
-        return version == 3
-
-    return not (cls, version) in of_g.is_fixed_length
-
-def base_type_to_length(base_type, version):
-    if base_type + "_t" in of_g.of_base_types:
-        inst_len = of_g.of_base_types[base_type + "_t"]["bytes"]
-    else:
-        inst_len = of_g.base_length[(base_type, version)]
-
-def version_to_name(version):
-    """
-    Convert an integer version to the C macro name
-    """
-    return "OF_" + of_g.version_names[version]
-
-##
-# Is class a flow modify of some sort?
-
-def cls_is_flow_mod(cls):
-    return cls in ["of_flow_modify", "of_flow_add", "of_flow_delete",
-                   "of_flow_modify_strict", "of_flow_delete_strict"]
-
-
-def all_member_types_get(cls, version):
-    """
-    Get the members and list of types for members of a given class
-    @param cls The class name to process
-    @param version The version for the class
-    """
-    member_types = []
-
-    if not version in of_g.unified[cls]:
-        return ([], [])
-
-    if "use_version" in of_g.unified[cls][version]:
-        v = of_g.unified[cls][version]["use_version"]
-        members = of_g.unified[cls][v]["members"]
-    else:
-        members = of_g.unified[cls][version]["members"]
-    # Accumulate variables that are supported
-    for member in members:
-        m_type = member["m_type"]
-        m_name = member["name"]
-        if skip_member_name(m_name):
-            continue
-        if not m_type in member_types:
-            member_types.append(m_type)
-
-    return (members, member_types)
-
-def list_name_extract(list_type):
-    """
-    Return the base name for a list object of the given type
-    @param list_type The type of the list as appears in the input,
-    for example list(of_port_desc_t).
-    @return A pair, (list-name, base-type) where list-name is the
-    base name for the list, for example of_list_port_desc, and base-type
-    is the type of list elements like of_port_desc_t
-    """
-    base_type = list_type[5:-1]
-    list_name = base_type
-    if list_name.find("of_") == 0:
-        list_name = list_name[3:]
-    if list_name[-2:] == "_t":
-        list_name = list_name[:-2]
-    list_name = "of_list_" + list_name
-    return (list_name, base_type)
-
-def version_to_name(version):
-    """
-    Convert an integer version to the C macro name
-    """
-    return "OF_" + of_g.version_names[version]
-
-def gen_c_copy_license(out):
-    """
-    Generate the top comments for copyright and license
-    """
-    out.write("""\
-/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University */
-/* Copyright (c) 2011, 2012 Open Networking Foundation */
-/* Copyright (c) 2012, 2013 Big Switch Networks, Inc. */
-
-""")
-
-def accessor_returns_error(a_type, m_type):
-    is_var_len = (not type_is_scalar(m_type)) and \
-        [x for x in of_g.of_version_range if class_is_var_len(m_type[:-2], x)] != []
-    if a_type == "set" and is_var_len:
-        return True
-    elif m_type == "of_match_t":
-        return True
-    else:
-        return False
-
-def render_template(out, name, path, context):
-    """
-    Render a template using tenjin.
-    out: a file-like object
-    name: name of the template
-    path: array of directories to search for the template
-    context: dictionary of variables to pass to the template
-    """
-    pp = [ tenjin.PrefixedLinePreprocessor() ] # support "::" syntax
-    template_globals = { "to_str": str, "escape": str } # disable HTML escaping
-    engine = TemplateEngine(path=path, pp=pp)
-    out.write(engine.render(name, context, template_globals))
-
-def render_static(out, name, path):
-    """
-    Write out a static template.
-    out: a file-like object
-    name: name of the template
-    path: array of directories to search for the template
-    """
-    # Reuse the tenjin logic for finding the template
-    template_filename = tenjin.FileSystemLoader().find(name, path)
-    if not template_filename:
-        raise ValueError("template %s not found" % name)
-    with open(template_filename) as infile:
-        out.write(infile.read())
-
-class TemplateEngine(tenjin.Engine):
-    def include(self, template_name, **kwargs):
-        """
-        Tenjin has an issue with nested includes that use the same local variable
-        names, because it uses the same context dict for each level of nesting.
-        The fix is to copy the context.
-        """
-        frame = sys._getframe(1)
-        locals  = frame.f_locals
-        globals = frame.f_globals
-        context = locals["_context"].copy()
-        context.update(kwargs)
-        template = self.get_template(template_name, context, globals)
-        return template.render(context, globals, _buf=locals["_buf"])
+        return oftype
diff --git a/loxigen.py b/loxigen.py
index 84afe76..700168a 100755
--- a/loxigen.py
+++ b/loxigen.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 # Copyright 2013, Big Switch Networks, Inc.
 #
 # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
@@ -48,7 +48,7 @@
 These are taken from current versions of openflow.h but are modified
 a bit.  See Overview for more information.
 
-Class canonical form:   A list of entries, each of which is a 
+Class canonical form:   A list of entries, each of which is a
 pair "type, name;".  The exception is when type is the keyword
 'list' in which the syntax is "list(type) name;".
 
@@ -60,8 +60,8 @@
 
 @fixme Clean up the lang module architecture.  It should provide a
 list of files that it wants to generate and maps to the filenames,
-subdirectory names and generation functions.  It should also be 
-defined as a class, probably with the constructor taking the 
+subdirectory names and generation functions.  It should also be
+defined as a class, probably with the constructor taking the
 language target.
 
 @fixme Clean up global data structures such as versions and of_g
@@ -69,369 +69,55 @@
 
 """
 
-import sys
-
+from collections import OrderedDict, defaultdict
+import copy
+import glob
+from optparse import OptionParser
+import os
 import re
 import string
-import os
-import glob
-import copy
-import of_g
-import loxi_front_end.oxm as oxm
-import loxi_front_end.type_maps as type_maps
+import sys
+
+import cmdline
+from loxi_globals import OFVersions
+import loxi_globals
 import loxi_utils.loxi_utils as loxi_utils
-import loxi_front_end.c_parse_utils as c_parse_utils
-import loxi_front_end.identifiers as identifiers
 import pyparsing
 import loxi_front_end.parser as parser
-import loxi_front_end.translation as translation
-
+import loxi_front_end.frontend as frontend
+import loxi_ir
 from generic_utils import *
 
 root_dir = os.path.dirname(os.path.realpath(__file__))
 
-# TODO:  Put these in a class so they get documented
-
-## Dict indexed by version giving all info related to version
-#
-# This is local; after processing, the information is stored in
-# of_g variables.
-versions = {}
-
-def config_sanity_check():
-    """
-    Check the configuration for basic consistency
-
-    @fixme Needs update for generic language support
-    """
-
-    rv = True
-    # For now, only "error" supported for get returns
-    if config_check("copy_semantics") != "read":
-        debug("Only 'read' is supported for copy_semantics");
-        rv = False        
-    if config_check("get_returns") != "error":
-        debug("Only 'error' is supported for get-accessor return types\m");
-        rv = False        
-    if not config_check("use_fn_ptrs") and not config_check("gen_unified_fns"):
-        debug("Must have gen_fn_ptrs and/or gen_unified_fns set in config")
-        rv = False
-    if config_check("use_obj_id"):
-        debug("use_obj_id is set but not yet supported (change \
-config_sanity_check if it is)")
-        rv = False
-    if config_check("gen_unified_macros") and config_check("gen_unified_fns") \
-            and config_check("gen_unified_macro_lower"):
-        debug("Conflict: Cannot generate unified functions and lower case \
-unified macros")
-        rv = False
-        
-    return rv
-
-def add_class(wire_version, cls, members):
-    """
-    Process a class for the given version and update the unified 
-    list of classes as needed.
-
-    @param wire_version The wire version for this class defn
-    @param cls The name of the class being added
-    @param members The list of members with offsets calculated
-    """
-    memid = 0
-
-    sig = loxi_utils.class_signature(members)
-    if cls in of_g.unified:
-        uc = of_g.unified[cls]
-        if wire_version in uc:
-            debug("Error adding %s to unified. Wire ver %d exists" %
-                  (cls, wire_version))
-            sys.exit(1)
-        uc[wire_version] = {}
-        # Check for a matching signature
-        for wver in uc:
-            if type(wver) != type(0): continue
-            if wver == wire_version: continue
-            if not "use_version" in uc[wver]:
-                if sig == loxi_utils.class_signature(uc[wver]["members"]):
-                    log("Matched %s, ver %d to ver %d" % 
-                          (cls, wire_version, wver))
-                    # have a match with existing version
-                    uc[wire_version]["use_version"] = wver
-                    # What else to do?
-                    return
-    else:  # Haven't seen this entry before
-        log("Adding %s to unified list, ver %d" % (cls, wire_version))
-        of_g.unified[cls] = dict(union={})
-        uc = of_g.unified[cls]
-
-    # At this point, need to add members for this version
-    uc[wire_version] = dict(members = members)
-
-    # Per member processing:
-    #  Add to union list (I'm sure there's a better way)
-    #  Check if it's a list
-    union = uc["union"]
-    if not cls in of_g.ordered_members:
-        of_g.ordered_members[cls] = []
-    for member in members:
-        m_name = member["name"]
-        m_type = member["m_type"]
-        if m_name.find("pad") == 0:
-            continue
-        if m_name in union:
-            if not m_type == union[m_name]["m_type"]:
-                debug("ERROR:   CLASS: %s. VERSION %d. MEMBER: %s. TYPE: %s" %
-                      (cls, wire_version, m_name, m_type))
-                debug("    Type conflict adding member to unified set.")
-                debug("    Current union[%s]:" % m_name)
-                debug(union[m_name])
-                sys.exit(1)
-        else:
-            union[m_name] = dict(m_type=m_type, memid=memid)
-            memid += 1
-        if not m_name in of_g.ordered_members[cls]:
-            of_g.ordered_members[cls].append(m_name)
-
-def update_offset(cls, wire_version, name, offset, m_type):
-    """
-    Update (and return) the offset based on type.
-    @param cls The parent class
-    @param wire_version The wire version being processed
-    @param name The name of the data member
-    @param offset The current offset
-    @param m_type The type declaration being processed
-    @returns A pair (next_offset, len_update)  next_offset is the new offset
-    of the next object or -1 if this is a var-length object.  len_update
-    is the increment that should be added to the length.  Note that (for
-    of_match_v3) it is variable length, but it adds 8 bytes to the fixed
-    length of the object
-    If offset is already -1, do not update
-    Otherwise map to base type and count and update (if possible)
-    """
-    if offset < 0:    # Don't update offset once set to -1
-        return offset, 0
-
-    count, base_type = c_parse_utils.type_dec_to_count_base(m_type)
-
-    len_update = 0
-    if base_type in of_g.of_mixed_types:
-        base_type = of_g.of_mixed_types[base_type][wire_version]
-
-    base_class = base_type[:-2]
-    if (base_class, wire_version) in of_g.is_fixed_length:
-        bytes = of_g.base_length[(base_class, wire_version)]
-    else:
-        if base_type == "of_match_v3_t":
-            # This is a special case: it has non-zero min length
-            # but is variable length
-            bytes = -1
-            len_update = 8
-        elif base_type in of_g.of_base_types:
-            bytes = of_g.of_base_types[base_type]["bytes"]
-        else:
-            print "UNKNOWN TYPE for %s %s: %s" % (cls, name, base_type)
-            log("UNKNOWN TYPE for %s %s: %s" % (cls, name, base_type))
-            bytes = -1
-
-    # If bytes
-    if bytes > 0:
-        len_update = count * bytes
-
-    if bytes == -1:
-        return -1, len_update
-
-    return offset + (count * bytes), len_update
-
-def calculate_offsets_and_lengths(ordered_classes, classes, wire_version):
-    """
-    Generate the offsets for fixed offset class members
-    Also calculate the class_sizes when possible.
-
-    @param classes The classes to process
-    @param wire_version The wire version for this set of classes
-
-    Updates global variables
-    """
-
-    lists = set()
-
-    # Generate offsets
-    for cls in ordered_classes:
-        fixed_offset = 0 # The last "good" offset seen
-        offset = 0
-        last_offset = 0
-        last_name = "-"
-        for member in classes[cls]:
-            m_type = member["m_type"]
-            name = member["name"]
-            if last_offset == -1:
-                if name == "pad":
-                    log("Skipping pad for special offset for %s" % cls)
-                else:
-                    log("SPECIAL OFS: Member %s (prev %s), class %s ver %d" % 
-                          (name, last_name, cls, wire_version))
-                    if (((cls, name) in of_g.special_offsets) and
-                        (of_g.special_offsets[(cls, name)] != last_name)):
-                        debug("ERROR: special offset prev name changed")
-                        debug("  cls %s. name %s. version %d. was %s. now %s" %
-                              cls, name, wire_version, 
-                              of_g.special_offsets[(cls, name)], last_name)
-                        sys.exit(1)
-                    of_g.special_offsets[(cls, name)] = last_name
-
-            member["offset"] = offset
-            if m_type.find("list(") == 0:
-                (list_name, base_type) = loxi_utils.list_name_extract(m_type)
-                lists.add(list_name)
-                member["m_type"] = list_name + "_t"
-                offset = -1
-            elif m_type.find("struct") == 0:
-                debug("ERROR found struct: %s.%s " % (cls, name))
-                sys.exit(1)
-            elif m_type == "octets":
-                log("offset gen skipping octets: %s.%s " % (cls, name))
-                offset = -1
-            else:
-                offset, len_update = update_offset(cls, wire_version, name, 
-                                                  offset, m_type)
-                if offset != -1:
-                    fixed_offset = offset
-                else:
-                    fixed_offset += len_update
-                    log("offset is -1 for %s.%s version %d " % 
-                        (cls, name, wire_version))
-            last_offset = offset
-            last_name = name
-        of_g.base_length[(cls, wire_version)] = fixed_offset
-        if (offset != -1):
-            of_g.is_fixed_length.add((cls, wire_version))
-    for list_type in lists:
-        classes[list_type] = []
-        of_g.ordered_classes[wire_version].append(list_type)
-        of_g.base_length[(list_type, wire_version)] = 0
-
 def process_input_file(filename):
     """
     Process an input file
 
+    Does not modify global state.
+
     @param filename The input filename
 
-    @returns (wire_version, classes), where wire_version is the integer wire
-    protocol number and classes is the dict of all classes processed from the
-    file.
+    @returns An OFInput object
     """
 
     # Parse the input file
     try:
-        ast = parser.parse(open(filename, 'r').read())
+        with open(filename, 'r') as f:
+            ast = parser.parse(f.read())
     except pyparsing.ParseBaseException as e:
         print "Parse error in %s: %s" % (os.path.basename(filename), str(e))
         sys.exit(1)
 
-    ofinput = of_g.OFInput()
-
-    # Now for each structure, generate lists for each member
-    for s in ast:
-        if s[0] == 'struct':
-            name = s[1].replace("ofp_", "of_", 1)
-            members = [dict(m_type=x[0], name=x[1]) for x in s[2]]
-            ofinput.classes[name] = members
-            ofinput.ordered_classes.append(name)
-            if name in type_maps.inheritance_map:
-                # Clone class into header class and add to list
-                ofinput.classes[name + "_header"] = members[:]
-                ofinput.ordered_classes.append(name + "_header")
-        if s[0] == 'enum':
-            name = s[1]
-            members = s[2]
-            ofinput.enums[name] = [(x[0], x[1]) for x in members]
-        elif s[0] == 'metadata':
-            if s[1] == 'version':
-                log("Found version: wire version " + s[2])
-                if s[2] == 'any':
-                    ofinput.wire_versions.update(of_g.wire_ver_map.keys())
-                elif int(s[2]) in of_g.supported_wire_protos:
-                    ofinput.wire_versions.add(int(s[2]))
-                else:
-                    debug("Unrecognized wire protocol version")
-                    sys.exit(1)
-                found_wire_version = True
-
-    if not ofinput.wire_versions:
-        debug("Missing #version metadata")
+    # Create the OFInput from the AST
+    try:
+        ofinput = frontend.create_ofinput(os.path.basename(filename), ast)
+    except frontend.InputError as e:
+        print "Error in %s: %s" % (os.path.basename(filename), str(e))
         sys.exit(1)
 
     return ofinput
 
-def order_and_assign_object_ids():
-    """
-    Order all classes and assign object ids to all classes.
-
-    This is done to promote a reasonable order of the objects, putting
-    messages first followed by non-messages.  No assumptions should be
-    made about the order, nor about contiguous numbering.  However, the
-    numbers should all be reasonably small allowing arrays indexed by 
-    these enum values to be defined.
-    """
-
-    # Generate separate message and non-message ordered lists
-    for cls in of_g.unified:
-        if loxi_utils.class_is_message(cls):
-            of_g.ordered_messages.append(cls)
-        elif loxi_utils.class_is_list(cls):
-            of_g.ordered_list_objects.append(cls)
-        else:
-            of_g.ordered_non_messages.append(cls)
-
-    of_g.ordered_pseudo_objects.append("of_stats_request")
-    of_g.ordered_pseudo_objects.append("of_stats_reply")
-    of_g.ordered_pseudo_objects.append("of_flow_mod")
-
-    of_g.ordered_messages.sort()
-    of_g.ordered_pseudo_objects.sort()
-    of_g.ordered_non_messages.sort()
-    of_g.ordered_list_objects.sort()
-    of_g.standard_class_order.extend(of_g.ordered_messages)
-    of_g.standard_class_order.extend(of_g.ordered_non_messages)
-    of_g.standard_class_order.extend(of_g.ordered_list_objects)
-
-    # This includes pseudo classes for which most code is not generated
-    of_g.all_class_order.extend(of_g.ordered_messages)
-    of_g.all_class_order.extend(of_g.ordered_non_messages)
-    of_g.all_class_order.extend(of_g.ordered_list_objects)
-    of_g.all_class_order.extend(of_g.ordered_pseudo_objects)
-
-    # Assign object IDs
-    for cls in of_g.ordered_messages:
-        of_g.unified[cls]["object_id"] = of_g.object_id
-        of_g.object_id += 1
-    for cls in of_g.ordered_non_messages:
-        of_g.unified[cls]["object_id"] = of_g.object_id
-        of_g.object_id += 1
-    for cls in of_g.ordered_list_objects:
-        of_g.unified[cls]["object_id"] = of_g.object_id
-        of_g.object_id += 1
-    for cls in of_g.ordered_pseudo_objects:
-        of_g.unified[cls] = {}
-        of_g.unified[cls]["object_id"] = of_g.object_id
-        of_g.object_id += 1
-
-
-def initialize_versions():
-    """
-    Create an empty datastructure for each target version.
-    """
-
-    for wire_version in of_g.target_version_list:
-        version_name = of_g.of_version_wire2name[wire_version]
-        of_g.wire_ver_map[wire_version] = version_name
-        versions[version_name] = dict(
-            version_name = version_name,
-            wire_version = wire_version,
-            classes = {})
-        of_g.ordered_classes[wire_version] = []
-
-
 def read_input():
     """
     Read in from files given on command line and update global state
@@ -439,150 +125,62 @@
     @fixme Should select versions to support from command line
     """
 
+    ofinputs_by_version = defaultdict(lambda: [])
     filenames = sorted(glob.glob("%s/openflow_input/*" % root_dir))
 
+    # Ignore emacs backup files
+    filenames = [x for x in filenames if not x.endswith('~')]
+
+    # Read input files
+    all_ofinputs = []
     for filename in filenames:
         log("Processing struct file: " + filename)
         ofinput = process_input_file(filename)
 
-        # Populate global state
         for wire_version in ofinput.wire_versions:
-            version_name = of_g.of_version_wire2name[wire_version]
-            versions[version_name]['classes'].update(copy.deepcopy(ofinput.classes))
-            of_g.ordered_classes[wire_version].extend(ofinput.ordered_classes)
+            ofinputs_by_version[wire_version].append(ofinput)
+    return ofinputs_by_version
 
-            for enum_name, members in ofinput.enums.items():
-                for member_name, value in members:
-                    identifiers.add_identifier(
-                        translation.loxi_name(member_name),
-                        member_name, enum_name, value, wire_version,
-                        of_g.identifiers, of_g.identifiers_by_group)
+def build_ir(ofinputs_by_version):
+    classes = []
+    enums = []
+    for wire_version, ofinputs in ofinputs_by_version.items():
+        version = OFVersions.from_wire(wire_version)
+        ofprotocol = loxi_ir.build_protocol(version, ofinputs)
+        loxi_globals.ir[version] = ofprotocol
 
-def add_extra_classes():
-    """
-    Add classes that are generated by Python code instead of from the
-    input files.
-    """
+    loxi_globals.unified = loxi_ir.build_unified_ir(loxi_globals.ir)
 
-    for wire_version in [of_g.VERSION_1_2, of_g.VERSION_1_3]:
-        version_name = of_g.of_version_wire2name[wire_version]
-        oxm.add_oxm_classes_1_2(versions[version_name]['classes'], wire_version)
-
-def analyze_input():
-    """
-    Add information computed from the input, including offsets and
-    lengths of struct members and the set of list and action_id types.
-    """
-
-    # Generate action_id classes for OF 1.3
-    for wire_version, ordered_classes in of_g.ordered_classes.items():
-        if not wire_version in [of_g.VERSION_1_3]:
-            continue
-        classes = versions[of_g.of_version_wire2name[wire_version]]['classes']
-        for cls in ordered_classes:
-            if not loxi_utils.class_is_action(cls):
-                continue
-            action = cls[10:]
-            if action == '' or action == 'header':
-                continue
-            name = "of_action_id_" + action
-            members = classes["of_action"][:]
-            of_g.ordered_classes[wire_version].append(name)
-            if type_maps.action_id_is_extension(name, wire_version):
-                # Copy the base action classes thru subtype
-                members = classes["of_action_" + action][:4]
-            classes[name] = members
-
-    # @fixme If we support extended actions in OF 1.3, need to add IDs
-    # for them here
-
-    for wire_version in of_g.wire_ver_map.keys():
-        version_name = of_g.of_version_wire2name[wire_version]
-        calculate_offsets_and_lengths(
-            of_g.ordered_classes[wire_version],
-            versions[version_name]['classes'],
-            wire_version)
-
-def unify_input():
-    """
-    Create Unified View of Objects
-    """
-
-    global versions
-
-    # Add classes to unified in wire-format order so that it is easier 
-    # to generate things later
-    keys = versions.keys()
-    keys.sort(reverse=True)
-    for version in keys:
-        wire_version = versions[version]["wire_version"]
-        classes = versions[version]["classes"]
-        for cls in of_g.ordered_classes[wire_version]:
-            add_class(wire_version, cls, classes[cls])
-
-
-def log_all_class_info():
-    """
-    Log the results of processing the input
-
-    Debug function
-    """
-
-    for cls in of_g.unified:
-        for v in of_g.unified[cls]:
-            if type(v) == type(0):
-                log("cls: %s. ver: %d. base len %d. %s" %
-                    (str(cls), v, of_g.base_length[(cls, v)],
-                     loxi_utils.class_is_var_len(cls,v) and "not fixed"
-                     or "fixed"))
-                if "use_version" in of_g.unified[cls][v]:
-                    log("cls %s: v %d mapped to %d" % (str(cls), v, 
-                           of_g.unified[cls][v]["use_version"]))
-                if "members" in of_g.unified[cls][v]:
-                    for member in of_g.unified[cls][v]["members"]:
-                        log("   %-20s: type %-20s. offset %3d" %
-                            (member["name"], member["m_type"],
-                             member["offset"]))
-
-def generate_all_files():
-    """
-    Create the files for the language target
-    """
-    for (name, fn) in lang_module.targets.items():
-        path = of_g.options.install_dir + '/' + name
-        os.system("mkdir -p %s" % os.path.dirname(path))
-        with open(path, "w") as outfile:
-            fn(outfile, os.path.basename(name))
-        print("Wrote contents for " + name)
+################################################################
+#
+# Debug
+#
+################################################################
 
 if __name__ == '__main__':
-    of_g.loxigen_log_file = open("loxigen.log", "w")
-    of_g.loxigen_dbg_file = sys.stdout
-
-    of_g.process_commandline()
+    (options, args, target_versions) = cmdline.process_commandline()
     # @fixme Use command line params to select log
 
-    if not config_sanity_check():
+    logging.basicConfig(level = logging.INFO if not options.verbose else logging.DEBUG)
+
+    # Import the language file
+    lang_file = "lang_%s" % options.lang
+    lang_module = __import__(lang_file)
+
+    if hasattr(lang_module, "config_sanity_check") and not lang_module.config_sanity_check():
         debug("Config sanity check failed\n")
         sys.exit(1)
 
-    # Import the language file
-    lang_file = "lang_%s" % of_g.options.lang
-    lang_module = __import__(lang_file)
-
     # If list files, just list auto-gen files to stdout and exit
-    if of_g.options.list_files:
+    if options.list_files:
         for name in lang_module.targets:
-            print of_g.options.install_dir + '/' + name
+            print options.install_dir + '/' + name
         sys.exit(0)
 
-    log("\nGenerating files for target language %s\n" % of_g.options.lang)
+    log("\nGenerating files for target language %s\n" % options.lang)
 
-    initialize_versions()
-    read_input()
-    add_extra_classes()
-    analyze_input()
-    unify_input()
-    order_and_assign_object_ids()
-    log_all_class_info()
-    generate_all_files()
+    loxi_globals.OFVersions.target_versions = target_versions
+    inputs = read_input()
+    build_ir(inputs)
+    #log_all_class_info()
+    lang_module.generate(options.install_dir)
diff --git a/openflow_input/bsn b/openflow_input/bsn
new file mode 100644
index 0000000..dfbf981
--- /dev/null
+++ b/openflow_input/bsn
@@ -0,0 +1,71 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+#version any
+
+// BSN extension message
+struct of_bsn_header : of_experimenter {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == ?;
+};
+
+// BSN extension action
+struct of_action_bsn : of_action_experimenter {
+    uint16_t type == 65535;
+    uint16_t len;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == ?;
+    pad(4);
+};
+
+struct of_bsn_stats_request : of_experimenter_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == ?;
+};
+
+struct of_bsn_stats_reply : of_experimenter_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == ?;
+};
diff --git a/openflow_input/bsn-1.3 b/openflow_input/bsn-1.3
new file mode 100644
index 0000000..f9340b4
--- /dev/null
+++ b/openflow_input/bsn-1.3
@@ -0,0 +1,37 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+#version 4
+
+// BSN extension instruction
+struct of_instruction_bsn : of_instruction_experimenter {
+    uint16_t type == 65535;
+    uint16_t len;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == ?;
+    pad(4);
+};
diff --git a/openflow_input/bsn_arp_idle b/openflow_input/bsn_arp_idle
new file mode 100644
index 0000000..26cea92
--- /dev/null
+++ b/openflow_input/bsn_arp_idle
@@ -0,0 +1,50 @@
+// Copyright 2014, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+// This asynchronous message is used by the switch to notify the controller
+// when an entry in the ARP table has been idle for longer than its configured
+// timeout. Similar to the flow idle notification (see bsn_flow_idle), the
+// notification will be resent every timeout interval, and the switch will
+// not remove the table entry on its own.
+
+#version 4
+
+struct of_bsn_arp_idle : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 60;
+    uint16_t vlan_vid;
+    pad(2);
+    of_ipv4_t ipv4_addr;
+};
+
diff --git a/openflow_input/bsn_arp_offload b/openflow_input/bsn_arp_offload
new file mode 100644
index 0000000..5a229b1
--- /dev/null
+++ b/openflow_input/bsn_arp_offload
@@ -0,0 +1,40 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+//
+// Also derived from the OpenFlow header files which have these copyrights:
+// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
+// Copyright (c) 2011, 2012 Open Networking Foundation
+
+#version 4
+
+struct of_instruction_bsn_arp_offload : of_instruction_bsn {
+    uint16_t type == 65535;
+    uint16_t len;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 1;
+    pad(4);
+};
diff --git a/openflow_input/bsn_aux_cxns b/openflow_input/bsn_aux_cxns
new file mode 100644
index 0000000..247f81b
--- /dev/null
+++ b/openflow_input/bsn_aux_cxns
@@ -0,0 +1,54 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+// Request that the switch spawn and configure auxiliary OF connections.
+//
+#version 4
+// Set the number of desired aux connections num_aux=(0-16) accompanying this main connection
+// This message is only allowed on the main connection. 
+struct of_bsn_set_aux_cxns_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 58;
+    uint32_t num_aux;
+};
+
+// Synchronous reply. Confirms that the aux_cxn_set_request has been received and that the
+// the requested num_aux value is supported by the switch.
+struct of_bsn_set_aux_cxns_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 59;
+    uint32_t num_aux;
+    uint32_t status; //0 = Success, !0 = Failure
+};
diff --git a/openflow_input/bsn_bw b/openflow_input/bsn_bw
new file mode 100644
index 0000000..50c9267
--- /dev/null
+++ b/openflow_input/bsn_bw
@@ -0,0 +1,90 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version any
+
+struct of_bsn_bw_enable_set_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 18;
+    uint32_t enable;        // 0 to disable the extension, 1 to enable it
+};
+
+struct of_bsn_bw_enable_set_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 23;
+    uint32_t enable;        // Resulting state, 0 disabled, 1 enabled
+    uint32_t status;        // Result code: 0 success
+};
+
+struct of_bsn_bw_enable_get_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 19;
+};
+
+struct of_bsn_bw_enable_get_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 20;
+    uint32_t enabled;       // 0 if feature is disabled; 1 if feature enabled
+};
+
+struct of_bsn_bw_clear_data_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 21;
+};
+
+struct of_bsn_bw_clear_data_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 22;
+    uint32_t status;        // Result code, 0 success
+};
diff --git a/openflow_input/bsn_controller_connections b/openflow_input/bsn_controller_connections
new file mode 100644
index 0000000..eae315e
--- /dev/null
+++ b/openflow_input/bsn_controller_connections
@@ -0,0 +1,71 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+// Retrieve a list of configured controller connections and their status.
+// When auxiliary connections are used there may be multiple connections
+// to the same controller. All connections to a given controller will
+// share the same role (so there may be multiple master connections in
+// the list).
+
+// The URIs are of the form tcp://1.2.3.4:6553
+
+#version 4
+
+enum ofp_bsn_controller_connection_state(wire_type=uint8_t) {
+    OFP_BSN_CONTROLLER_CONNECTION_STATE_DISCONNECTED = 0,
+    OFP_BSN_CONTROLLER_CONNECTION_STATE_CONNECTED = 1,
+};
+
+struct of_bsn_controller_connections_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 56;
+};
+
+struct of_bsn_controller_connection {
+    enum ofp_bsn_controller_connection_state state;
+    uint8_t auxiliary_id;
+    pad(2);
+    enum ofp_controller_role role;
+    of_desc_str_t uri;
+};
+
+struct of_bsn_controller_connections_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 57;
+    list(of_bsn_controller_connection_t) connections;
+};
diff --git a/openflow_input/bsn_dhcp_offload b/openflow_input/bsn_dhcp_offload
new file mode 100644
index 0000000..b21b036
--- /dev/null
+++ b/openflow_input/bsn_dhcp_offload
@@ -0,0 +1,40 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+//
+// Also derived from the OpenFlow header files which have these copyrights:
+// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
+// Copyright (c) 2011, 2012 Open Networking Foundation
+
+#version 4
+
+struct of_instruction_bsn_dhcp_offload : of_instruction_bsn {
+    uint16_t type == 65535;
+    uint16_t len;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 2;
+    pad(4);
+};
diff --git a/openflow_input/bsn_disable_src_mac_check b/openflow_input/bsn_disable_src_mac_check
new file mode 100644
index 0000000..f75c237
--- /dev/null
+++ b/openflow_input/bsn_disable_src_mac_check
@@ -0,0 +1,40 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+//
+// Also derived from the OpenFlow header files which have these copyrights:
+// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
+// Copyright (c) 2011, 2012 Open Networking Foundation
+
+#version 4
+
+struct of_instruction_bsn_disable_src_mac_check : of_instruction_bsn {
+    uint16_t type == 65535;
+    uint16_t len;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 0;
+    pad(4);
+};
diff --git a/openflow_input/bsn_flow_checksum b/openflow_input/bsn_flow_checksum
new file mode 100644
index 0000000..f8d9b51
--- /dev/null
+++ b/openflow_input/bsn_flow_checksum
@@ -0,0 +1,116 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+// Retrieves the checksum for every bucket in a table. The entries are ordered
+// by bucket index.
+//
+// The checksum of a bucket is the XOR of the cookies of all entries in the
+// bucket. Flows are bucketed based on a prefix of the cookie.
+struct of_bsn_flow_checksum_bucket_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 10;
+    uint8_t table_id;
+};
+
+struct of_bsn_flow_checksum_bucket_stats_entry {
+    uint64_t checksum;
+};
+
+struct of_bsn_flow_checksum_bucket_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 10;
+    list(of_bsn_flow_checksum_bucket_stats_entry_t) entries;
+};
+
+
+// Retrieves the checksum for every table.
+//
+// The checksum of a table is the XOR of the cookies of all entries in the
+// table.
+struct of_bsn_table_checksum_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 11;
+};
+
+struct of_bsn_table_checksum_stats_entry {
+    uint8_t table_id;
+    uint64_t checksum;
+};
+
+struct of_bsn_table_checksum_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 11;
+    list(of_bsn_table_checksum_stats_entry_t) entries;
+};
+
+
+// This message sets the size of the buckets array. The switch may reject this
+// message if the table has entries. buckets_size must be a power of 2.
+struct of_bsn_table_set_buckets_size : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 61;
+    uint16_t table_id;
+    pad(2);
+    uint32_t buckets_size;
+};
diff --git a/openflow_input/bsn_flow_idle b/openflow_input/bsn_flow_idle
new file mode 100644
index 0000000..40a95d8
--- /dev/null
+++ b/openflow_input/bsn_flow_idle
@@ -0,0 +1,99 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+/*
+ * Notification of idle flows
+ *
+ * This extension allows the controller to request to be notified periodically
+ * about idle flows. It is very similar to the flow_removed message in standard
+ * OpenFlow, but does not delete the idle flows.
+ *
+ * If the extension is enabled using of_bsn_flow_idle_enable_set_request and
+ * the OFPFF_BSN_SEND_IDLE bit is set in the flow-mod, then the idle_timeout
+ * field in the flow-mod is not used for standard flow expiration. Instead,
+ * the switch will send an of_bsn_flow_idle message every idle_timeout seconds
+ * if the flow was not used during that period.
+ */
+
+struct of_bsn_flow_idle_enable_set_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 36;
+    uint32_t enable;        // 0 to disable the extension, 1 to enable it
+};
+
+struct of_bsn_flow_idle_enable_set_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 37;
+    uint32_t enable;        // Resulting state, 0 disabled, 1 enabled
+    uint32_t status;        // Result code: 0 success
+};
+
+struct of_bsn_flow_idle_enable_get_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 38;
+};
+
+struct of_bsn_flow_idle_enable_get_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 39;
+    uint32_t enabled;       // 0 if feature is disabled; 1 if feature enabled
+};
+
+struct of_bsn_flow_idle : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 40;
+    uint64_t cookie;
+    uint16_t priority;
+    uint8_t table_id;
+    pad(5); // align to 8 bytes
+    of_match_t match;
+};
diff --git a/openflow_input/bsn_gentable b/openflow_input/bsn_gentable
new file mode 100644
index 0000000..6500789
--- /dev/null
+++ b/openflow_input/bsn_gentable
@@ -0,0 +1,353 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+// We have a number of switch agents that need to be configured by the
+// controller and report stats. Some of them will have large tables (1000+
+// entries) and so need an efficient synchronization mechanism (as can be
+// accomplished using the cookie field in flowtable entries). It's a
+// significant amount of work to do this from scratch for each new table.
+// This extension (and the corresponding Indigo code) provides a framework
+// to ease implementing new tables.
+
+// We don't plan on replacing our use of the OpenFlow flow table and group
+// table with this scheme. This is intended for controlling switch
+// functionality like the ARP and LACP agents which don't map at all to
+// flow-mods.
+
+// Each switch will have a number of tables indexed by a 16-bit table ID. Each
+// table has a name, id, a set of entries, and an array of checksum buckets.
+// There is no order to the entries; stats requests will return them in an
+// arbitrary order. The controller is expected to use the table name to
+// determine the semantics of a table.
+
+// Each entry has a key, value, stats, and checksum. The key and value are TLV
+// lists given by the controller in a gentable_entry_add message. The switch must
+// return these lists in stats replies exactly as it received them. The stats
+// are a list of TLVs controlled by the switch. The stats are expected to
+// include more than simple counters (for example, last hit time or seen TCP
+// flags). The checksum is an opaque value used for table synchronization.
+
+// LOXI includes a built-in type of_checksum_128_t, which is 128 bits but
+// only requires 32-bit alignment.
+
+
+// These TLV classes are used for keys, values, and stats. Like OXM, lists of
+// TLVs are tightly packed without padding. TLV lists may include duplicates
+// and the semantics of this is left to the particular table.
+//
+// If this is eventually standardized it would be good to add a "class" type
+// member as in OXM.
+struct of_bsn_tlv {
+    uint16_t type == ?;
+    uint16_t length;
+};
+
+
+// This message sets key=value in the given table. If key already exists in the
+// table then it modifies the value, preserving stats.
+//
+// If the switch cannot process the message then it should reply with an error
+// message. The contents of the table must not be changed in case of an error.
+struct of_bsn_gentable_entry_add : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 46;
+    uint16_t table_id;
+    uint16_t key_length;
+    of_checksum_128_t checksum;
+    list(of_bsn_tlv_t) key;
+    list(of_bsn_tlv_t) value;
+};
+
+
+// This message deletes the entry with the given key in the given table.
+//
+// If the switch cannot process the message then it should reply with an error
+// message. The contents of the table must not be changed in case of an error.
+// If the key does not exist in the table no error will be generated.
+struct of_bsn_gentable_entry_delete : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 47;
+    uint16_t table_id;
+    list(of_bsn_tlv_t) key;
+};
+
+
+// This message deletes a range of table entries. The checksum_mask must be a
+// prefix mask. The checksum must be zero in the bits where the checksum_mask
+// is zero.
+//
+// The switch may fail to delete some table entries. No error messages will be
+// sent, but the error_count in the reply message will be incremented.
+struct of_bsn_gentable_clear_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 48;
+    uint16_t table_id;
+    pad(2);
+    of_checksum_128_t checksum;
+    of_checksum_128_t checksum_mask;
+};
+
+struct of_bsn_gentable_clear_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 49;
+    uint16_t table_id;
+    pad(2);
+    uint32_t deleted_count;
+    uint32_t error_count;
+};
+
+
+// This message sets the size of the buckets array. The switch may reject this
+// message if the table has entries. buckets_size must be a power of 2.
+struct of_bsn_gentable_set_buckets_size : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 50;
+    uint16_t table_id;
+    pad(2);
+    uint32_t buckets_size;
+};
+
+
+// Retrieve the configuration state (key, value, and checksum) for each table
+// entry in a range of buckets.
+//
+// The checksum_mask must be a prefix mask. The checksum must be zero in the
+// bits where the checksum_mask is zero.
+struct of_bsn_gentable_entry_desc_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 2;
+    uint16_t table_id;
+    pad(2);
+    of_checksum_128_t checksum;
+    of_checksum_128_t checksum_mask;
+};
+
+struct of_bsn_gentable_entry_desc_stats_entry {
+    uint16_t length;
+    uint16_t key_length;
+    of_checksum_128_t checksum;
+    list(of_bsn_tlv_t) key;
+    list(of_bsn_tlv_t) value;
+};
+
+struct of_bsn_gentable_entry_desc_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 2;
+    list(of_bsn_gentable_entry_desc_stats_entry_t) entries;
+};
+
+
+// Retrieve the runtime state (key and stats) for each table entry in a range
+// of buckets.
+//
+// The checksum_mask must be a prefix mask. The checksum must be zero in the
+// bits where the checksum_mask is zero.
+struct of_bsn_gentable_entry_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 3;
+    uint16_t table_id;
+    pad(2);
+    of_checksum_128_t checksum;
+    of_checksum_128_t checksum_mask;
+};
+
+struct of_bsn_gentable_entry_stats_entry {
+    uint16_t length;
+    uint16_t key_length;
+    list(of_bsn_tlv_t) key;
+    list(of_bsn_tlv_t) stats;
+};
+
+struct of_bsn_gentable_entry_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 3;
+    list(of_bsn_gentable_entry_stats_entry_t) entries;
+};
+
+
+// Retrieve the description for all tables.
+struct of_bsn_gentable_desc_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 4;
+};
+
+struct of_bsn_gentable_desc_stats_entry {
+    uint16_t length;
+    uint16_t table_id;
+    of_table_name_t name;
+    uint32_t buckets_size;
+    uint32_t max_entries;
+    pad(4);
+    /* TODO properties */
+};
+
+struct of_bsn_gentable_desc_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 4;
+    list(of_bsn_gentable_desc_stats_entry_t) entries;
+};
+
+
+// Retrieves stats for every table. This includes the total checksum, so the
+// controller can quickly check whether the whole table is in sync.
+//
+// The checksum of a table is the XOR of the checksums of all entries in the
+// table.
+struct of_bsn_gentable_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 7;
+};
+
+struct of_bsn_gentable_stats_entry {
+    uint16_t table_id;
+    pad(2);
+    uint32_t entry_count;
+    of_checksum_128_t checksum;
+};
+
+struct of_bsn_gentable_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 7;
+    list(of_bsn_gentable_stats_entry_t) entries;
+};
+
+
+// Retrieves the checksum for every bucket in a table. The entries are ordered
+// by bucket index.
+//
+// The checksum of a bucket is the XOR of the checksums of all entries in the
+// bucket.
+struct of_bsn_gentable_bucket_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 5;
+    uint16_t table_id;
+};
+
+struct of_bsn_gentable_bucket_stats_entry {
+    of_checksum_128_t checksum;
+};
+
+struct of_bsn_gentable_bucket_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 5;
+    list(of_bsn_gentable_bucket_stats_entry_t) entries;
+};
diff --git a/openflow_input/bsn_get_interfaces b/openflow_input/bsn_get_interfaces
index 90060ee..6f316b0 100644
--- a/openflow_input/bsn_get_interfaces
+++ b/openflow_input/bsn_get_interfaces
@@ -27,29 +27,29 @@
 
 #version any
 
-struct ofp_bsn_get_interfaces_request {
+struct of_bsn_get_interfaces_request : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_GET_INTERFACES_REQUEST
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 9;
 };
 
-struct ofp_bsn_interface {
+struct of_bsn_interface {
     of_mac_addr_t hw_addr;
-    uint16_t pad;
+    pad(2);
     of_port_name_t name;
-    uint32_t ipv4_addr;
-    uint32_t ipv4_netmask;
+    of_ipv4_t ipv4_addr;
+    of_ipv4_t ipv4_netmask;
 };
 
-struct ofp_bsn_get_interfaces_reply {
+struct of_bsn_get_interfaces_reply : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_GET_INTERFACES_REQUEST
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 10;
     list(of_bsn_interface_t) interfaces;
 };
diff --git a/openflow_input/bsn_global_vrf_allowed b/openflow_input/bsn_global_vrf_allowed
new file mode 100644
index 0000000..ee26308
--- /dev/null
+++ b/openflow_input/bsn_global_vrf_allowed
@@ -0,0 +1,47 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * Global VRF allowed flag for SwitchLight
+ */
+
+struct of_oxm_bsn_global_vrf_allowed : of_oxm {
+    uint32_t type_len == 0x00030601;
+    uint8_t value;
+};
+
+struct of_oxm_bsn_global_vrf_allowed_masked : of_oxm {
+    uint32_t type_len == 0x00030702;
+    uint8_t value;
+    uint8_t value_mask;
+};
diff --git a/openflow_input/bsn_hybrid b/openflow_input/bsn_hybrid
new file mode 100644
index 0000000..1cab739
--- /dev/null
+++ b/openflow_input/bsn_hybrid
@@ -0,0 +1,51 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+#version 1
+
+// BSN hybrid mode status messages
+struct of_bsn_hybrid_get_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 27;
+};
+
+struct of_bsn_hybrid_get_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 28;
+    uint8_t hybrid_enable;    // 1 == enabled, 0 == disabled
+    pad(1);
+    uint16_t hybrid_version;
+    pad(4);
+};
diff --git a/openflow_input/bsn_in_ports b/openflow_input/bsn_in_ports
new file mode 100644
index 0000000..b23df63
--- /dev/null
+++ b/openflow_input/bsn_in_ports
@@ -0,0 +1,61 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * Bitmap of input ports
+ *
+ * The representation is not straightforward, but it works with existing OXM
+ * semantics.
+ *
+ * The value should always be zero. The mask should be unset in every bit position
+ * where the corresponding input port is allowed, and set in all other bits.
+ * As a special case, the highest bit in the mask is reserved for higher port
+ * numbers than can be represented in the bitmap.
+ *
+ * The value1 and value_mask1 fields contain the most significant bits. value2
+ * and value_mask2 contain the least significant bits.
+ *
+ * Pseudocode for populating value or mask:
+ *   bitmap |= in_port < 128 ? (1 << in_port) : (1 << 127)
+ */
+
+struct of_oxm_bsn_in_ports_128 : of_oxm {
+    uint32_t type_len == 0x00030010;
+    of_bitmap_128_t value;
+};
+
+struct of_oxm_bsn_in_ports_128_masked : of_oxm {
+    uint32_t type_len == 0x00030120;
+    of_bitmap_128_t value;
+    of_bitmap_128_t value_mask;
+};
diff --git a/openflow_input/bsn_ip_mask b/openflow_input/bsn_ip_mask
index 03f233b..615ed40 100644
--- a/openflow_input/bsn_ip_mask
+++ b/openflow_input/bsn_ip_mask
@@ -27,37 +27,37 @@
 
 #version 1
 
-struct ofp_bsn_set_ip_mask {
+struct of_bsn_set_ip_mask : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;  // bsn 0x005c16c7,
-    uint32_t subtype;       // 0
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 0;
     uint8_t index;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t mask;
 };
 
-struct ofp_bsn_get_ip_mask_request {
+struct of_bsn_get_ip_mask_request : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;  // bsn 0x005c16c7,
-    uint32_t subtype;       // 1
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 1;
     uint8_t index;
-    uint8_t[7] pad;
+    pad(7);
 };
 
-struct ofp_bsn_get_ip_mask_reply {
+struct of_bsn_get_ip_mask_reply : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;  // bsn 0x005c16c7,
-    uint32_t subtype;       // 2
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 2;
     uint8_t index;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t mask;
 };
diff --git a/openflow_input/bsn_l2_table b/openflow_input/bsn_l2_table
new file mode 100644
index 0000000..725c805
--- /dev/null
+++ b/openflow_input/bsn_l2_table
@@ -0,0 +1,77 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+#version 1
+
+// BSN L2 table configuration messages
+struct of_bsn_set_l2_table_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 12;
+    uint8_t l2_table_enable;    // 1 == enabled, 0 == disabled
+    pad(1);
+    uint16_t l2_table_priority;  // priority of all flows in L2 table
+    pad(4);
+};
+
+struct of_bsn_set_l2_table_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 24;
+    uint8_t l2_table_enable;    // Resulting state: 1 == enabled, 0 == disabled
+    pad(1);
+    uint16_t l2_table_priority;  // priority used, must match request if ok
+    uint32_t status; // 0 means success
+};
+
+struct of_bsn_get_l2_table_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 13;
+};
+
+struct of_bsn_get_l2_table_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 14;
+    uint8_t l2_table_enable;    // 1 == enabled, 0 == disabled
+    pad(1);
+    uint16_t l2_table_priority;  // priority of all flows in L2 table
+    pad(4);
+};
diff --git a/openflow_input/bsn_l3_dst_class_id b/openflow_input/bsn_l3_dst_class_id
new file mode 100644
index 0000000..8d324a1
--- /dev/null
+++ b/openflow_input/bsn_l3_dst_class_id
@@ -0,0 +1,47 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * L3 destination class ID for SwitchLight
+ */
+
+struct of_oxm_bsn_l3_dst_class_id : of_oxm {
+    uint32_t type_len == 0x00030c04;
+    uint32_t value;
+};
+
+struct of_oxm_bsn_l3_dst_class_id_masked : of_oxm {
+    uint32_t type_len == 0x00030d08;
+    uint32_t value;
+    uint32_t value_mask;
+};
diff --git a/openflow_input/bsn_l3_interface_class_id b/openflow_input/bsn_l3_interface_class_id
new file mode 100644
index 0000000..80d1e37
--- /dev/null
+++ b/openflow_input/bsn_l3_interface_class_id
@@ -0,0 +1,47 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * L3 interface class ID for SwitchLight
+ */
+
+struct of_oxm_bsn_l3_interface_class_id : of_oxm {
+    uint32_t type_len == 0x00030804;
+    uint32_t value;
+};
+
+struct of_oxm_bsn_l3_interface_class_id_masked : of_oxm {
+    uint32_t type_len == 0x00030908;
+    uint32_t value;
+    uint32_t value_mask;
+};
diff --git a/openflow_input/bsn_l3_src_class_id b/openflow_input/bsn_l3_src_class_id
new file mode 100644
index 0000000..58f60c6
--- /dev/null
+++ b/openflow_input/bsn_l3_src_class_id
@@ -0,0 +1,47 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * L3 source class ID for SwitchLight
+ */
+
+struct of_oxm_bsn_l3_src_class_id : of_oxm {
+    uint32_t type_len == 0x00030a04;
+    uint32_t value;
+};
+
+struct of_oxm_bsn_l3_src_class_id_masked : of_oxm {
+    uint32_t type_len == 0x00030b08;
+    uint32_t value;
+    uint32_t value_mask;
+};
diff --git a/openflow_input/bsn_lacp b/openflow_input/bsn_lacp
new file mode 100644
index 0000000..d2e540a
--- /dev/null
+++ b/openflow_input/bsn_lacp
@@ -0,0 +1,132 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+// LACP Convergence Status set in of_bsn_lacp_convergence_notif message
+enum of_bsn_lacp_convergence_status_t(wire_type=uint8_t, complete=False) {
+    LACP_SUCCESS = 0,
+    LACP_TIMEDOUT = 1,
+    LACP_OUT_OF_SYNC = 2,
+};
+
+struct of_bsn_set_lacp_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 41;
+    uint8_t enabled;
+    pad(3);
+    of_port_no_t port_no;
+    uint16_t actor_sys_priority;
+    of_mac_addr_t actor_sys_mac;
+    uint16_t actor_port_priority;
+    uint16_t actor_port_num;
+    uint16_t actor_key;
+};
+
+struct of_bsn_set_lacp_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 42;
+    uint32_t status;
+    of_port_no_t port_no;
+};
+
+struct of_bsn_lacp_convergence_notif : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 43;
+    uint8_t convergence_status;
+    pad(3);
+    of_port_no_t port_no;
+
+    uint16_t actor_sys_priority;
+    of_mac_addr_t actor_sys_mac;
+    uint16_t actor_port_priority;
+    uint16_t actor_port_num;
+    uint16_t actor_key;
+
+    uint16_t partner_sys_priority;
+    of_mac_addr_t partner_sys_mac;
+    uint16_t partner_port_priority;
+    uint16_t partner_port_num;
+    uint16_t partner_key;
+};
+
+struct of_bsn_lacp_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 1;
+};
+
+struct of_bsn_lacp_stats_entry {
+    of_port_no_t port_no;
+    uint16_t actor_sys_priority;
+    of_mac_addr_t actor_sys_mac;
+    uint16_t actor_port_priority;
+    uint16_t actor_port_num;
+    uint16_t actor_key;
+    uint8_t convergence_status;
+    pad(1);
+    uint16_t partner_sys_priority;
+    of_mac_addr_t partner_sys_mac;
+    uint16_t partner_port_priority;
+    uint16_t partner_port_num;
+    uint16_t partner_key;
+    pad(2);
+};
+
+struct of_bsn_lacp_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 1;
+    list(of_bsn_lacp_stats_entry_t) entries;
+};
diff --git a/openflow_input/bsn_lag_id b/openflow_input/bsn_lag_id
new file mode 100644
index 0000000..63645a3
--- /dev/null
+++ b/openflow_input/bsn_lag_id
@@ -0,0 +1,47 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * Ingress LAG ID for SwitchLight
+ */
+
+struct of_oxm_bsn_lag_id : of_oxm {
+    uint32_t type_len == 0x00030204;
+    uint32_t value;
+};
+
+struct of_oxm_bsn_lag_id_masked : of_oxm {
+    uint32_t type_len == 0x00030308;
+    uint32_t value;
+    uint32_t value_mask;
+};
diff --git a/openflow_input/bsn_mirror b/openflow_input/bsn_mirror
index c873595..e2f473f 100644
--- a/openflow_input/bsn_mirror
+++ b/openflow_input/bsn_mirror
@@ -28,47 +28,47 @@
 #version any
 
 // BSN mirror action
-struct ofp_action_bsn_mirror {
-    uint16_t type;      // OF_ACTION_TYPE_EXPERIMENTER
+struct of_action_bsn_mirror : of_action_bsn {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN 
-    uint32_t subtype;   // ACTION_BSN_MIRROR 
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 1;
     uint32_t dest_port; // mirror destination port
     uint32_t vlan_tag;  // VLAN tag for mirrored packet (TPID+TCI) (0 == none)
     uint8_t copy_stage; // 0 == ingress, 1 == egress 
-    uint8_t[3] pad;
+    pad(3);
 };
 
 // BSN mirroring messages
-struct ofp_bsn_set_mirroring {
+struct of_bsn_set_mirroring : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_MIRRORING_SET
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 3;
     uint8_t report_mirror_ports;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_bsn_get_mirroring_request {
+struct of_bsn_get_mirroring_request : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_MIRRORING_GET_REQUEST
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 4;
     uint8_t report_mirror_ports;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_bsn_get_mirroring_reply {
+struct of_bsn_get_mirroring_reply : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_MIRRORING_GET_REPLY
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 5;
     uint8_t report_mirror_ports;
-    uint8_t[3] pad;
+    pad(3);
 };
diff --git a/openflow_input/bsn_pdu b/openflow_input/bsn_pdu
new file mode 100644
index 0000000..66465f3
--- /dev/null
+++ b/openflow_input/bsn_pdu
@@ -0,0 +1,100 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version any
+
+// When the slot_num field has this value, the message applies
+// to all currently used slots on the switch for the given port
+enum of_bsn_pdu_slot_num_t(wire_type=uint8_t, complete=False) {
+    BSN_PDU_SLOT_NUM_ANY = 0xff
+};
+
+struct of_bsn_pdu_tx_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 31;
+    uint32_t tx_interval_ms;
+    of_port_no_t port_no;
+    uint8_t slot_num;
+    pad(3);
+    of_octets_t data;
+};
+
+struct of_bsn_pdu_tx_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 32;
+    uint32_t status; // 0 means success
+    of_port_no_t port_no;
+    uint8_t slot_num;
+};
+
+struct of_bsn_pdu_rx_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 33;
+    uint32_t timeout_ms;
+    of_port_no_t port_no;
+    uint8_t slot_num;
+    pad(3);
+    of_octets_t data;
+};
+
+struct of_bsn_pdu_rx_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 34;
+    uint32_t status; // 0 means success
+    of_port_no_t port_no;
+    uint8_t slot_num;
+};
+
+struct of_bsn_pdu_rx_timeout : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 35;
+    of_port_no_t port_no;
+    uint8_t slot_num;
+};
diff --git a/openflow_input/bsn_pktin_suppression b/openflow_input/bsn_pktin_suppression
index fcfa4aa..ab4c2fd 100644
--- a/openflow_input/bsn_pktin_suppression
+++ b/openflow_input/bsn_pktin_suppression
@@ -55,17 +55,27 @@
 // The switch should reject the message if both 'hard_timeout' and 'idle_timeout'
 // are zero, since the suppression flows would never expire.
 
-struct ofp_bsn_set_pktin_suppression {
+struct of_bsn_set_pktin_suppression_request : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;         // 11
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 11;
     uint8_t enabled;
-    uint8_t pad;
+    pad(1);
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint64_t cookie;
 };
+
+struct of_bsn_set_pktin_suppression_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 25;
+    uint32_t status; // 0 means success
+};
diff --git a/openflow_input/bsn_port_counter b/openflow_input/bsn_port_counter
new file mode 100644
index 0000000..f3d149f
--- /dev/null
+++ b/openflow_input/bsn_port_counter
@@ -0,0 +1,79 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+enum ofp_bsn_port_counter(wire_type=uint8_t, complete=False) {
+  OFP_BSN_PORT_COUNTER_RX_BYTES = 0,
+  OFP_BSN_PORT_COUNTER_RX_PACKETS_UNICAST = 1,
+  OFP_BSN_PORT_COUNTER_RX_PACKETS_BROADCAST = 2,
+  OFP_BSN_PORT_COUNTER_RX_PACKETS_MULTICAST = 3,
+  OFP_BSN_PORT_COUNTER_RX_DROPPED = 4,
+  OFP_BSN_PORT_COUNTER_RX_ERRORS = 5,
+  OFP_BSN_PORT_COUNTER_TX_BYTES = 6,
+  OFP_BSN_PORT_COUNTER_TX_PACKETS_UNICAST = 7,
+  OFP_BSN_PORT_COUNTER_TX_PACKETS_BROADCAST = 8,
+  OFP_BSN_PORT_COUNTER_TX_PACKETS_MULTICAST = 9,
+  OFP_BSN_PORT_COUNTER_TX_DROPPED = 10,
+  OFP_BSN_PORT_COUNTER_TX_ERRORS = 11,
+};
+
+struct of_bsn_port_counter_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 8;
+    of_port_no_t port_no;
+};
+
+struct of_bsn_port_counter_stats_entry {
+    uint16_t length;
+    pad(2);
+    of_port_no_t port_no;
+    list(of_uint64_t) values;
+};
+
+struct of_bsn_port_counter_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 8;
+    list(of_bsn_port_counter_stats_entry_t) entries;
+};
diff --git a/openflow_input/bsn_role_status b/openflow_input/bsn_role_status
new file mode 100644
index 0000000..f53ff23
--- /dev/null
+++ b/openflow_input/bsn_role_status
@@ -0,0 +1,55 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+// Backport the OpenFlow 1.4 role status message
+//
+// This message will be sent when a controller's role changes for any reason
+// other than it using OFPT_ROLE_REQUEST.
+
+#version 4
+
+enum ofp_bsn_controller_role_reason(wire_type=uint8_t) {
+    OFP_BSN_CONTROLLER_ROLE_REASON_MASTER_REQUEST = 0,
+    OFP_BSN_CONTROLLER_ROLE_REASON_CONFIG = 1,
+    OFP_BSN_CONTROLLER_ROLE_REASON_EXPERIMENTER = 2,
+};
+
+struct of_bsn_role_status : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 55;
+    enum ofp_controller_role role;
+    enum ofp_bsn_controller_role_reason reason;
+    pad(3);
+    uint64_t generation_id;
+};
diff --git a/openflow_input/bsn_set_tunnel_dst b/openflow_input/bsn_set_tunnel_dst
index 6b16d96..b9be58f 100644
--- a/openflow_input/bsn_set_tunnel_dst
+++ b/openflow_input/bsn_set_tunnel_dst
@@ -28,10 +28,10 @@
 #version any
 
 // BSN set tunnel destination IP action
-struct ofp_action_bsn_set_tunnel_dst {
-    uint16_t type;      // OF_ACTION_TYPE_EXPERIMENTER
+struct of_action_bsn_set_tunnel_dst : of_action_bsn {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN 
-    uint32_t subtype;   // ACTION_BSN_SET_TUNNEL_DST
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 2;
     uint32_t dst; // tunnel destination IP
 };
diff --git a/openflow_input/bsn_shell b/openflow_input/bsn_shell
index ea85571..d9c2dc7 100644
--- a/openflow_input/bsn_shell
+++ b/openflow_input/bsn_shell
@@ -27,33 +27,33 @@
 
 #version 1
 
-struct ofp_bsn_shell_command {
+struct of_bsn_shell_command : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_SHELL_COMMAND
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 6;
     uint32_t service;
     of_octets_t data;
 };
 
-struct ofp_bsn_shell_output {
+struct of_bsn_shell_output : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_SHELL_OUTPUT
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 7;
     of_octets_t data;
 };
 
-struct ofp_bsn_shell_status {
+struct of_bsn_shell_status : of_bsn_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_BSN
-    uint32_t subtype;   // BSN_SHELL_STATUS
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 8;
     uint32_t status;
 };
diff --git a/openflow_input/bsn_switch_pipeline b/openflow_input/bsn_switch_pipeline
new file mode 100644
index 0000000..ff2213f
--- /dev/null
+++ b/openflow_input/bsn_switch_pipeline
@@ -0,0 +1,99 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+struct of_bsn_get_switch_pipeline_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 51;
+};
+
+struct of_bsn_get_switch_pipeline_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 52;
+    of_desc_str_t pipeline;
+};
+
+struct of_bsn_set_switch_pipeline_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 53;
+    of_desc_str_t pipeline;
+};
+
+struct of_bsn_set_switch_pipeline_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 54;
+    uint32_t status;
+};
+
+struct of_bsn_switch_pipeline_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 6;
+};
+
+struct of_bsn_switch_pipeline_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 6;
+    list(of_bsn_switch_pipeline_stats_entry_t) entries;
+};
+
+struct of_bsn_switch_pipeline_stats_entry {
+    of_desc_str_t pipeline;
+};
diff --git a/openflow_input/bsn_table_mod b/openflow_input/bsn_table_mod
index 418c716..5846c53 100644
--- a/openflow_input/bsn_table_mod
+++ b/openflow_input/bsn_table_mod
@@ -29,12 +29,12 @@
 
 // This is the 1.1+ table mod message which we back port to 1.0
 // for use inside components
-struct ofp_table_mod {
+struct of_table_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 22;
     uint16_t length;
     uint32_t xid;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t config;
 };
diff --git a/openflow_input/bsn_time b/openflow_input/bsn_time
new file mode 100644
index 0000000..0a955be
--- /dev/null
+++ b/openflow_input/bsn_time
@@ -0,0 +1,54 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+// Request a timestamp during message processing.
+//
+// The timestamp is relative to when the switch sent the initial HELLO.
+// The intended use is in conjunction with barriers to approximately determine
+// time elapsed between processing two messages (such as stats requests).
+// The timestamp must be monotonic (not affected by system time updates).
+
+#version 4
+
+struct of_bsn_time_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 44;
+};
+
+struct of_bsn_time_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 45;
+    uint64_t time_ms; /* Milliseconds since HELLO */
+};
diff --git a/openflow_input/bsn_tlv b/openflow_input/bsn_tlv
new file mode 100644
index 0000000..6105687
--- /dev/null
+++ b/openflow_input/bsn_tlv
@@ -0,0 +1,120 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+struct of_bsn_tlv_port : of_bsn_tlv {
+    uint16_t type == 0;
+    uint16_t length;
+    of_port_no_t value;
+};
+
+struct of_bsn_tlv_mac : of_bsn_tlv {
+    uint16_t type == 1;
+    uint16_t length;
+    of_mac_addr_t value;
+};
+
+struct of_bsn_tlv_rx_packets : of_bsn_tlv {
+    uint16_t type == 2;
+    uint16_t length;
+    uint64_t value;
+};
+
+struct of_bsn_tlv_tx_packets : of_bsn_tlv {
+    uint16_t type == 3;
+    uint16_t length;
+    uint64_t value;
+};
+
+struct of_bsn_tlv_ipv4 : of_bsn_tlv {
+    uint16_t type == 4;
+    uint16_t length;
+    of_ipv4_t value;
+};
+
+struct of_bsn_tlv_idle_time : of_bsn_tlv {
+    uint16_t type == 5;
+    uint16_t length;
+    uint64_t value; /* Milliseconds */
+};
+
+struct of_bsn_tlv_vlan_vid : of_bsn_tlv {
+    uint16_t type == 6;
+    uint16_t length;
+    uint16_t value;
+};
+
+struct of_bsn_tlv_idle_notification : of_bsn_tlv {
+    uint16_t type == 7;
+    uint16_t length;
+};
+
+struct of_bsn_tlv_idle_timeout : of_bsn_tlv {
+    uint16_t type == 8;
+    uint16_t length;
+    uint32_t value; /* Milliseconds */
+};
+
+struct of_bsn_tlv_unicast_query_timeout : of_bsn_tlv {
+    uint16_t type == 9;
+    uint16_t length;
+    uint32_t value; /* Milliseconds */
+};
+
+struct of_bsn_tlv_broadcast_query_timeout : of_bsn_tlv {
+    uint16_t type == 10;
+    uint16_t length;
+    uint32_t value; /* Milliseconds */
+};
+
+struct of_bsn_tlv_request_packets : of_bsn_tlv {
+    uint16_t type == 11;
+    uint16_t length;
+    uint64_t value;
+};
+
+struct of_bsn_tlv_reply_packets : of_bsn_tlv {
+    uint16_t type == 12;
+    uint16_t length;
+    uint64_t value;
+};
+
+struct of_bsn_tlv_miss_packets : of_bsn_tlv {
+    uint16_t type == 13;
+    uint16_t length;
+    uint64_t value;
+};
+
+struct of_bsn_tlv_circuit_id : of_bsn_tlv {
+    uint16_t type == 14;
+    uint16_t length;
+    of_octets_t value;
+};
diff --git a/openflow_input/bsn_vlan_counter b/openflow_input/bsn_vlan_counter
new file mode 100644
index 0000000..51dd356
--- /dev/null
+++ b/openflow_input/bsn_vlan_counter
@@ -0,0 +1,71 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 4
+
+enum of_bsn_vlan_counter_t(wire_type=uint8_t, complete=False) {
+  OFP_BSN_VLAN_COUNTER_RX_BYTES = 0,
+  OFP_BSN_VLAN_COUNTER_RX_PACKETS = 1,
+  OFP_BSN_VLAN_COUNTER_TX_BYTES = 2,
+  OFP_BSN_VLAN_COUNTER_TX_PACKETS = 3,
+};
+
+struct of_bsn_vlan_counter_stats_request : of_bsn_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 9;
+    uint16_t vlan_vid;
+};
+
+struct of_bsn_vlan_counter_stats_entry {
+    uint16_t length;
+    uint16_t vlan_vid;
+    pad(4);
+    list(of_uint64_t) values;
+};
+
+struct of_bsn_vlan_counter_stats_reply : of_bsn_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 9;
+    list(of_bsn_vlan_counter_stats_entry_t) entries;
+};
diff --git a/openflow_input/bsn_vport b/openflow_input/bsn_vport
new file mode 100644
index 0000000..4eeda8c
--- /dev/null
+++ b/openflow_input/bsn_vport
@@ -0,0 +1,113 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version any
+
+enum ofp_bsn_vport_status {
+    OF_BSN_VPORT_STATUS_OK = 0,
+    OF_BSN_VPORT_STATUS_FAILED = 1,
+};
+
+// When the ingress or egress VID has this value, no outer tag should be used.
+// In this case, the corresponding TPID is ignored.
+
+enum ofp_bsn_vport_q_in_q_untagged(wire_type=uint16_t, complete=False) {
+    OF_BSN_VPORT_Q_IN_Q_UNTAGGED = 0xffff,
+};
+
+// BSN Virtual port object header
+// FIXME For now, inheritance is not exercised.  See below.
+struct of_bsn_vport {
+    uint16_t type == ?;  /* Discriminate virtual port type */
+    uint16_t length; /* Length in bytes of this structure with this header */
+    /* Remainder of data is specific to the port type */
+};
+
+
+// Q-in-Q virtual port specification
+
+struct of_bsn_vport_q_in_q : of_bsn_vport {
+    uint16_t type == 0;
+    uint16_t length;  /* 32 */
+    uint32_t port_no;     /* OF port number of parent; usually phys port */
+    uint16_t ingress_tpid;
+    uint16_t ingress_vlan_id;
+    uint16_t egress_tpid;
+    uint16_t egress_vlan_id;
+    of_port_name_t if_name;  /* Name to use in create operation */
+};
+
+// Request from controller to switch to create vport
+struct of_bsn_virtual_port_create_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 15;
+    // FIXME This should be an instance of the inheritance superclass
+    of_bsn_vport_q_in_q_t vport;   // Description of vport to create
+    // Additional data follows depending on header type
+};
+
+// Reply from switch to controller indicating port number created
+// vport_no must be 16 bits to be compatible with 1.0
+struct of_bsn_virtual_port_create_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;           // Must match create_request
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 16;
+    uint32_t status;        // 0 means success
+    uint32_t vport_no;      // The OF port number created.  16-bits for OF 1.0
+};
+
+// Request from controller to switch to remove a vport
+struct of_bsn_virtual_port_remove_request : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 17;
+    uint32_t vport_no;      // The OF port number to be removed
+};
+
+// Request from controller to switch to remove a vport
+struct of_bsn_virtual_port_remove_reply : of_bsn_header {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;           // Must match request xid
+    uint32_t experimenter == 0x5c16c7;
+    uint32_t subtype == 26;
+    uint32_t status;        // 0 means success
+};
diff --git a/openflow_input/bsn_vrf b/openflow_input/bsn_vrf
new file mode 100644
index 0000000..26959bd
--- /dev/null
+++ b/openflow_input/bsn_vrf
@@ -0,0 +1,47 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License,
+// version 1.0 (EPL), with the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may
+// distribute libraries generated by LoxiGen (LoxiGen Libraries)
+// under the terms of your choice, provided that copyright and
+// licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i)
+// included in the LoxiGen Libraries, if distributed in source code
+// form and (ii) included in any documentation for the LoxiGen
+// Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc.
+// This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or
+// LOXI Exception. You may obtain a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an "AS
+// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the EPL for the specific language
+// governing permissions and limitations under the EPL.
+
+#version 3
+#version 4
+
+/*
+ * Virtual Routing/Forwarding ID for SwitchLight
+ */
+
+struct of_oxm_bsn_vrf : of_oxm {
+    uint32_t type_len == 0x00030404;
+    uint32_t value;
+};
+
+struct of_oxm_bsn_vrf_masked : of_oxm {
+    uint32_t type_len == 0x00030508;
+    uint32_t value;
+    uint32_t value_mask;
+};
diff --git a/openflow_input/nicira b/openflow_input/nicira
new file mode 100644
index 0000000..ba2048d
--- /dev/null
+++ b/openflow_input/nicira
@@ -0,0 +1,49 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+
+#version any
+
+// Nicira extension message
+struct of_nicira_header : of_experimenter {
+    uint8_t version;
+    uint8_t type == 4;
+    uint16_t length;
+    uint32_t xid;
+    uint32_t experimenter == 0x2320;
+    uint32_t subtype == ?;
+};
+
+// Nicira extension action
+struct of_action_nicira: of_action_experimenter {
+    uint16_t type == 65535;
+    uint16_t len;
+    uint32_t experimenter == 0x2320;
+    uint16_t subtype == ?;
+    pad(2);
+    pad(4);
+};
+
diff --git a/openflow_input/nicira_dec_ttl b/openflow_input/nicira_dec_ttl
index b507d57..3b95366 100644
--- a/openflow_input/nicira_dec_ttl
+++ b/openflow_input/nicira_dec_ttl
@@ -27,11 +27,11 @@
 
 #version any
 
-struct ofp_action_nicira_dec_ttl {
-    uint16_t type;      // OF_ACTION_TYPE_EXPERIMENTER
+struct of_action_nicira_dec_ttl : of_action_nicira {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;    // OF_EXPERIMENTER_ID_NICIRA
-    uint16_t subtype;         // 18
-    uint16_t pad;
-    uint32_t pad2;
+    uint32_t experimenter == 0x2320;
+    uint16_t subtype == 18;
+    pad(2);
+    pad(4);
 };
diff --git a/openflow_input/nicira_role b/openflow_input/nicira_role
index caa2c1a..0df8742 100644
--- a/openflow_input/nicira_role
+++ b/openflow_input/nicira_role
@@ -27,28 +27,28 @@
 
 #version 1
 
-enum ofp_nicira_controller_role {
+enum ofp_nicira_controller_role(wire_type=uint32_t) {
    NX_ROLE_OTHER = 0,
    NX_ROLE_MASTER = 1,
    NX_ROLE_SLAVE = 2,
 };
 
-struct ofp_nicira_controller_role_request {
+struct of_nicira_controller_role_request : of_nicira_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter; // OF_EXPERIMENTER_ID_NICIRA 0x00002320
-    uint32_t subtype;      // 10
-    uint32_t role;         // 0 other, 1 master, 2 slave
+    uint32_t experimenter == 0x2320;
+    uint32_t subtype == 10;
+    enum ofp_nicira_controller_role role;         // 0 other, 1 master, 2 slave
 };
 
-struct ofp_nicira_controller_role_reply {
+struct of_nicira_controller_role_reply : of_nicira_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter; // OF_EXPERIMENTER_ID_NICIRA 0x00002320
-    uint32_t subtype;      // 11
-    uint32_t role;         // 0 other, 1 master, 2 slave
+    uint32_t experimenter == 0x2320;
+    uint32_t subtype == 11;
+    enum ofp_nicira_controller_role role;         // 0 other, 1 master, 2 slave
 };
diff --git a/openflow_input/oxm-1.2 b/openflow_input/oxm-1.2
new file mode 100644
index 0000000..5006cff
--- /dev/null
+++ b/openflow_input/oxm-1.2
@@ -0,0 +1,433 @@
+// Copyright 2013, Big Switch Networks, Inc.
+//
+// LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+// the following special exception:
+//
+// LOXI Exception
+//
+// As a special exception to the terms of the EPL, you may distribute libraries
+// generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+// that copyright and licensing notices generated by LoxiGen are not altered or removed
+// from the LoxiGen Libraries and the notice provided below is (i) included in
+// the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+// documentation for the LoxiGen Libraries, if distributed in binary form.
+//
+// Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+//
+// You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+// a copy of the EPL at:
+//
+// http://www.eclipse.org/legal/epl-v10.html
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// EPL for the specific language governing permissions and limitations
+// under the EPL.
+//
+// Also derived from the OpenFlow header files which have these copyrights:
+// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
+// Copyright (c) 2011, 2012 Open Networking Foundation
+
+#version 3
+#version 4
+
+struct of_oxm {
+    uint32_t type_len == ?;
+};
+
+struct of_oxm_arp_op : of_oxm {
+    uint32_t type_len == 0x80002a02;
+    uint16_t value;
+};
+
+struct of_oxm_arp_op_masked : of_oxm {
+    uint32_t type_len == 0x80002b04;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_arp_sha : of_oxm {
+    uint32_t type_len == 0x80003006;
+    of_mac_addr_t value;
+};
+
+struct of_oxm_arp_sha_masked : of_oxm {
+    uint32_t type_len == 0x8000310c;
+    of_mac_addr_t value;
+    of_mac_addr_t value_mask;
+};
+
+struct of_oxm_arp_spa : of_oxm {
+    uint32_t type_len == 0x80002c04;
+    uint32_t value;
+};
+
+struct of_oxm_arp_spa_masked : of_oxm {
+    uint32_t type_len == 0x80002d08;
+    uint32_t value;
+    uint32_t value_mask;
+};
+
+struct of_oxm_arp_tha : of_oxm {
+    uint32_t type_len == 0x80003206;
+    of_mac_addr_t value;
+};
+
+struct of_oxm_arp_tha_masked : of_oxm {
+    uint32_t type_len == 0x8000330c;
+    of_mac_addr_t value;
+    of_mac_addr_t value_mask;
+};
+
+struct of_oxm_arp_tpa : of_oxm {
+    uint32_t type_len == 0x80002e04;
+    uint32_t value;
+};
+
+struct of_oxm_arp_tpa_masked : of_oxm {
+    uint32_t type_len == 0x80002f08;
+    uint32_t value;
+    uint32_t value_mask;
+};
+
+struct of_oxm_eth_dst : of_oxm {
+    uint32_t type_len == 0x80000606;
+    of_mac_addr_t value;
+};
+
+struct of_oxm_eth_dst_masked : of_oxm {
+    uint32_t type_len == 0x8000070c;
+    of_mac_addr_t value;
+    of_mac_addr_t value_mask;
+};
+
+struct of_oxm_eth_src : of_oxm {
+    uint32_t type_len == 0x80000806;
+    of_mac_addr_t value;
+};
+
+struct of_oxm_eth_src_masked : of_oxm {
+    uint32_t type_len == 0x8000090c;
+    of_mac_addr_t value;
+    of_mac_addr_t value_mask;
+};
+
+struct of_oxm_eth_type : of_oxm {
+    uint32_t type_len == 0x80000a02;
+    uint16_t value;
+};
+
+struct of_oxm_eth_type_masked : of_oxm {
+    uint32_t type_len == 0x80000b04;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_icmpv4_code : of_oxm {
+    uint32_t type_len == 0x80002801;
+    uint8_t value;
+};
+
+struct of_oxm_icmpv4_code_masked : of_oxm {
+    uint32_t type_len == 0x80002902;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_icmpv4_type : of_oxm {
+    uint32_t type_len == 0x80002601;
+    uint8_t value;
+};
+
+struct of_oxm_icmpv4_type_masked : of_oxm {
+    uint32_t type_len == 0x80002702;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_icmpv6_code : of_oxm {
+    uint32_t type_len == 0x80003c01;
+    uint8_t value;
+};
+
+struct of_oxm_icmpv6_code_masked : of_oxm {
+    uint32_t type_len == 0x80003d02;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_icmpv6_type : of_oxm {
+    uint32_t type_len == 0x80003a01;
+    uint8_t value;
+};
+
+struct of_oxm_icmpv6_type_masked : of_oxm {
+    uint32_t type_len == 0x80003b02;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_in_phy_port : of_oxm {
+    uint32_t type_len == 0x80000204;
+    of_port_no_t value;
+};
+
+struct of_oxm_in_phy_port_masked : of_oxm {
+    uint32_t type_len == 0x80000308;
+    of_port_no_t value;
+    of_port_no_t value_mask;
+};
+
+struct of_oxm_in_port : of_oxm {
+    uint32_t type_len == 0x80000004;
+    of_port_no_t value;
+};
+
+struct of_oxm_in_port_masked : of_oxm {
+    uint32_t type_len == 0x80000108;
+    of_port_no_t value;
+    of_port_no_t value_mask;
+};
+
+struct of_oxm_ip_dscp : of_oxm {
+    uint32_t type_len == 0x80001001;
+    uint8_t value;
+};
+
+struct of_oxm_ip_dscp_masked : of_oxm {
+    uint32_t type_len == 0x80001102;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_ip_ecn : of_oxm {
+    uint32_t type_len == 0x80001201;
+    uint8_t value;
+};
+
+struct of_oxm_ip_ecn_masked : of_oxm {
+    uint32_t type_len == 0x80001302;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_ip_proto : of_oxm {
+    uint32_t type_len == 0x80001401;
+    uint8_t value;
+};
+
+struct of_oxm_ip_proto_masked : of_oxm {
+    uint32_t type_len == 0x80001502;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_ipv4_dst : of_oxm {
+    uint32_t type_len == 0x80001804;
+    of_ipv4_t value;
+};
+
+struct of_oxm_ipv4_dst_masked : of_oxm {
+    uint32_t type_len == 0x80001908;
+    of_ipv4_t value;
+    of_ipv4_t value_mask;
+};
+
+struct of_oxm_ipv4_src : of_oxm {
+    uint32_t type_len == 0x80001604;
+    of_ipv4_t value;
+};
+
+struct of_oxm_ipv4_src_masked : of_oxm {
+    uint32_t type_len == 0x80001708;
+    of_ipv4_t value;
+    of_ipv4_t value_mask;
+};
+
+struct of_oxm_ipv6_dst : of_oxm {
+    uint32_t type_len == 0x80003610;
+    of_ipv6_t value;
+};
+
+struct of_oxm_ipv6_dst_masked : of_oxm {
+    uint32_t type_len == 0x80003720;
+    of_ipv6_t value;
+    of_ipv6_t value_mask;
+};
+
+struct of_oxm_ipv6_flabel : of_oxm {
+    uint32_t type_len == 0x80003804;
+    uint32_t value;
+};
+
+struct of_oxm_ipv6_flabel_masked : of_oxm {
+    uint32_t type_len == 0x80003908;
+    uint32_t value;
+    uint32_t value_mask;
+};
+
+struct of_oxm_ipv6_nd_sll : of_oxm {
+    uint32_t type_len == 0x80004006;
+    of_mac_addr_t value;
+};
+
+struct of_oxm_ipv6_nd_sll_masked : of_oxm {
+    uint32_t type_len == 0x8000410c;
+    of_mac_addr_t value;
+    of_mac_addr_t value_mask;
+};
+
+struct of_oxm_ipv6_nd_target : of_oxm {
+    uint32_t type_len == 0x80003e10;
+    of_ipv6_t value;
+};
+
+struct of_oxm_ipv6_nd_target_masked : of_oxm {
+    uint32_t type_len == 0x80003f20;
+    of_ipv6_t value;
+    of_ipv6_t value_mask;
+};
+
+struct of_oxm_ipv6_nd_tll : of_oxm {
+    uint32_t type_len == 0x80004206;
+    of_mac_addr_t value;
+};
+
+struct of_oxm_ipv6_nd_tll_masked : of_oxm {
+    uint32_t type_len == 0x8000430c;
+    of_mac_addr_t value;
+    of_mac_addr_t value_mask;
+};
+
+struct of_oxm_ipv6_src : of_oxm {
+    uint32_t type_len == 0x80003410;
+    of_ipv6_t value;
+};
+
+struct of_oxm_ipv6_src_masked : of_oxm {
+    uint32_t type_len == 0x80003520;
+    of_ipv6_t value;
+    of_ipv6_t value_mask;
+};
+
+struct of_oxm_metadata : of_oxm {
+    uint32_t type_len == 0x80000408;
+    uint64_t value;
+};
+
+struct of_oxm_metadata_masked : of_oxm {
+    uint32_t type_len == 0x80000510;
+    uint64_t value;
+    uint64_t value_mask;
+};
+
+struct of_oxm_mpls_label : of_oxm {
+    uint32_t type_len == 0x80004404;
+    uint32_t value;
+};
+
+struct of_oxm_mpls_label_masked : of_oxm {
+    uint32_t type_len == 0x80004508;
+    uint32_t value;
+    uint32_t value_mask;
+};
+
+struct of_oxm_mpls_tc : of_oxm {
+    uint32_t type_len == 0x80004601;
+    uint8_t value;
+};
+
+struct of_oxm_mpls_tc_masked : of_oxm {
+    uint32_t type_len == 0x80004702;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_sctp_dst : of_oxm {
+    uint32_t type_len == 0x80002402;
+    uint16_t value;
+};
+
+struct of_oxm_sctp_dst_masked : of_oxm {
+    uint32_t type_len == 0x80002504;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_sctp_src : of_oxm {
+    uint32_t type_len == 0x80002202;
+    uint16_t value;
+};
+
+struct of_oxm_sctp_src_masked : of_oxm {
+    uint32_t type_len == 0x80002304;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_tcp_dst : of_oxm {
+    uint32_t type_len == 0x80001c02;
+    uint16_t value;
+};
+
+struct of_oxm_tcp_dst_masked : of_oxm {
+    uint32_t type_len == 0x80001d04;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_tcp_src : of_oxm {
+    uint32_t type_len == 0x80001a02;
+    uint16_t value;
+};
+
+struct of_oxm_tcp_src_masked : of_oxm {
+    uint32_t type_len == 0x80001b04;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_udp_dst : of_oxm {
+    uint32_t type_len == 0x80002002;
+    uint16_t value;
+};
+
+struct of_oxm_udp_dst_masked : of_oxm {
+    uint32_t type_len == 0x80002104;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_udp_src : of_oxm {
+    uint32_t type_len == 0x80001e02;
+    uint16_t value;
+};
+
+struct of_oxm_udp_src_masked : of_oxm {
+    uint32_t type_len == 0x80001f04;
+    uint16_t value;
+    uint16_t value_mask;
+};
+
+struct of_oxm_vlan_pcp : of_oxm {
+    uint32_t type_len == 0x80000e01;
+    uint8_t value;
+};
+
+struct of_oxm_vlan_pcp_masked : of_oxm {
+    uint32_t type_len == 0x80000f02;
+    uint8_t value;
+    uint8_t value_mask;
+};
+
+struct of_oxm_vlan_vid : of_oxm {
+    uint32_t type_len == 0x80000c02;
+    uint16_t value;
+};
+
+struct of_oxm_vlan_vid_masked : of_oxm {
+    uint32_t type_len == 0x80000d04;
+    uint16_t value;
+    uint16_t value_mask;
+};
diff --git a/openflow_input/standard-1.0 b/openflow_input/standard-1.0
index c191b89..1df03bb 100644
--- a/openflow_input/standard-1.0
+++ b/openflow_input/standard-1.0
@@ -35,8 +35,8 @@
     OFP_MAX_TABLE_NAME_LEN = 32,
     OFP_MAX_PORT_NAME_LEN = 16,
 
-    OFP_TCP_PORT = 6633,
-    OFP_SSL_PORT = 6633,
+    OFP_TCP_PORT = 6653,
+    OFP_SSL_PORT = 6653,
 
     OFP_ETH_ALEN = 6,
 
@@ -61,12 +61,12 @@
     OFPQ_MIN_RATE_UNCFG = 0xffff,
 };
 
-enum ofp_type {
+enum ofp_type(wire_type=uint8_t) {
     OFPT_HELLO = 0,
     OFPT_ERROR = 1,
     OFPT_ECHO_REQUEST = 2,
     OFPT_ECHO_REPLY = 3,
-    OFPT_VENDOR = 4,
+    OFPT_EXPERIMENTER = 4,
     OFPT_FEATURES_REQUEST = 5,
     OFPT_FEATURES_REPLY = 6,
     OFPT_GET_CONFIG_REQUEST = 7,
@@ -86,7 +86,7 @@
     OFPT_QUEUE_GET_CONFIG_REPLY = 21,
 };
 
-enum ofp_port_config {
+enum ofp_port_config(wire_type=uint32_t, bitmask=True) {
     OFPPC_PORT_DOWN = 0x1,
     OFPPC_NO_STP = 0x2,
     OFPPC_NO_RECV = 0x4,
@@ -94,18 +94,22 @@
     OFPPC_NO_FLOOD = 0x10,
     OFPPC_NO_FWD = 0x20,
     OFPPC_NO_PACKET_IN = 0x40,
+    OFPPC_BSN_MIRROR_DEST = 0x80000000,
 };
 
-enum ofp_port_state {
-    OFPPS_STP_LISTEN = 0,
+enum ofp_port_state(wire_type=uint32_t, bitmask=True) {
     OFPPS_LINK_DOWN = 1,
+    OFPPS_STP_LISTEN = 0,
     OFPPS_STP_LEARN = 0x100,
     OFPPS_STP_FORWARD = 0x200,
     OFPPS_STP_BLOCK = 0x300,
-    OFPPS_STP_MASK = 0x300,
+    OFPPS_STP_MASK(virtual=True) = 0x300,
 };
 
-enum ofp_port {
+// FIXME: these constants are currently 32 bit due to implementation
+// details of loci, which is in violation of the OpenFlow spec.
+// Should recast to 32 bits and fix/glue the c backend
+enum ofp_port(wire_type=uint16_t, complete=False) {
     OFPP_MAX = 0xffffff00,
     OFPP_IN_PORT = 0xfffffff8,
     OFPP_TABLE = 0xfffffff9,
@@ -117,7 +121,7 @@
     OFPP_NONE = 0xffffffff,
 };
 
-enum ofp_port_features {
+enum ofp_port_features(wire_type=uint32_t, bitmask=True) {
     OFPPF_10MB_HD = 0x1,
     OFPPF_10MB_FD = 0x2,
     OFPPF_100MB_HD = 0x4,
@@ -132,12 +136,12 @@
     OFPPF_PAUSE_ASYM = 0x800,
 };
 
-enum ofp_queue_properties {
+enum ofp_queue_properties(wire_type=uint32_t) {
     OFPQT_NONE = 0,
     OFPQT_MIN_RATE = 1,
 };
 
-enum ofp_flow_wildcards {
+enum ofp_flow_wildcards(wire_type=uint32_t, bitmask=True) {
     OFPFW_IN_PORT = 0x1,
     OFPFW_DL_VLAN = 0x2,
     OFPFW_DL_SRC = 0x4,
@@ -156,10 +160,10 @@
     OFPFW_NW_DST_MASK = 0xfc000,
     OFPFW_DL_VLAN_PCP = 0x100000,
     OFPFW_NW_TOS = 0x200000,
-    OFPFW_ALL = 0x3fffff,
+    OFPFW_ALL(virtual=True) = 0x3fffff,
 };
 
-enum ofp_action_type {
+enum ofp_action_type(wire_type=uint16_t) {
     OFPAT_OUTPUT = 0,
     OFPAT_SET_VLAN_VID = 1,
     OFPAT_SET_VLAN_PCP = 2,
@@ -172,10 +176,10 @@
     OFPAT_SET_TP_SRC = 9,
     OFPAT_SET_TP_DST = 10,
     OFPAT_ENQUEUE = 11,
-    OFPAT_VENDOR = 0xffff,
+    OFPAT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_capabilities {
+enum ofp_capabilities(wire_type=uint32_t, bitmask=True) {
     OFPC_FLOW_STATS = 0x1,
     OFPC_TABLE_STATS = 0x2,
     OFPC_PORT_STATS = 0x4,
@@ -186,14 +190,14 @@
     OFPC_ARP_MATCH_IP = 0x80,
 };
 
-enum ofp_config_flags {
+enum ofp_config_flags(wire_type=uint16_t, bitmask=True) {
     OFPC_FRAG_NORMAL = 0x0,
     OFPC_FRAG_DROP = 0x1,
     OFPC_FRAG_REASM = 0x2,
     OFPC_FRAG_MASK = 0x3,
 };
 
-enum ofp_flow_mod_command {
+enum ofp_flow_mod_command(wire_type=uint16_t) {
     OFPFC_ADD = 0,
     OFPFC_MODIFY = 1,
     OFPFC_MODIFY_STRICT = 2,
@@ -201,44 +205,48 @@
     OFPFC_DELETE_STRICT = 4,
 };
 
-enum ofp_flow_mod_flags {
+enum ofp_flow_mod_flags(wire_type=uint16_t, bitmask=True) {
     OFPFF_SEND_FLOW_REM = 0x1,
     OFPFF_CHECK_OVERLAP = 0x2,
     OFPFF_EMERG = 0x4,
 };
 
-enum ofp_stats_reply_flags {
+enum ofp_stats_request_flags(wire_type=uint16_t, bitmask=True) {
+};
+
+
+enum ofp_stats_reply_flags(wire_type=uint16_t, bitmask=True) {
     OFPSF_REPLY_MORE = 0x1,
 };
 
-enum ofp_stats_types {
+enum ofp_stats_type(wire_type=uint16_t) {
     OFPST_DESC = 0,
     OFPST_FLOW = 1,
     OFPST_AGGREGATE = 2,
     OFPST_TABLE = 3,
     OFPST_PORT = 4,
     OFPST_QUEUE = 5,
-    OFPST_VENDOR = 0xffff,
+    OFPST_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_packet_in_reason {
+enum ofp_packet_in_reason(wire_type=uint8_t) {
     OFPR_NO_MATCH = 0,
     OFPR_ACTION = 1,
 };
 
-enum ofp_flow_removed_reason {
+enum ofp_flow_removed_reason(wire_type=uint8_t) {
     OFPRR_IDLE_TIMEOUT = 0,
     OFPRR_HARD_TIMEOUT = 1,
     OFPRR_DELETE = 2,
 };
 
-enum ofp_port_reason {
+enum ofp_port_reason(wire_type=uint8_t) {
     OFPPR_ADD = 0,
     OFPPR_DELETE = 1,
     OFPPR_MODIFY = 2,
 };
 
-enum ofp_error_type {
+enum ofp_error_type(wire_type=uint16_t) {
     OFPET_HELLO_FAILED = 0,
     OFPET_BAD_REQUEST = 1,
     OFPET_BAD_ACTION = 2,
@@ -247,16 +255,16 @@
     OFPET_QUEUE_OP_FAILED = 5,
 };
 
-enum ofp_hello_failed_code {
+enum ofp_hello_failed_code(wire_type=uint16_t) {
     OFPHFC_INCOMPATIBLE = 0,
     OFPHFC_EPERM = 1,
 };
 
-enum ofp_bad_request_code {
+enum ofp_bad_request_code(wire_type=uint16_t) {
     OFPBRC_BAD_VERSION = 0,
     OFPBRC_BAD_TYPE = 1,
     OFPBRC_BAD_STAT = 2,
-    OFPBRC_BAD_VENDOR = 3,
+    OFPBRC_BAD_EXPERIMENTER = 3,
     OFPBRC_BAD_SUBTYPE = 4,
     OFPBRC_EPERM = 5,
     OFPBRC_BAD_LEN = 6,
@@ -264,11 +272,11 @@
     OFPBRC_BUFFER_UNKNOWN = 8,
 };
 
-enum ofp_bad_action_code {
+enum ofp_bad_action_code(wire_type=uint16_t) {
     OFPBAC_BAD_TYPE = 0,
     OFPBAC_BAD_LEN = 1,
-    OFPBAC_BAD_VENDOR = 2,
-    OFPBAC_BAD_VENDOR_TYPE = 3,
+    OFPBAC_BAD_EXPERIMENTER = 2,
+    OFPBAC_BAD_EXPERIMENTER_TYPE = 3,
     OFPBAC_BAD_OUT_PORT = 4,
     OFPBAC_BAD_ARGUMENT = 5,
     OFPBAC_EPERM = 6,
@@ -276,7 +284,7 @@
     OFPBAC_BAD_QUEUE = 8,
 };
 
-enum ofp_flow_mod_failed_code {
+enum ofp_flow_mod_failed_code(wire_type=uint16_t) {
     OFPFMFC_ALL_TABLES_FULL = 0,
     OFPFMFC_OVERLAP = 1,
     OFPFMFC_EPERM = 2,
@@ -285,142 +293,143 @@
     OFPFMFC_UNSUPPORTED = 5,
 };
 
-enum ofp_port_mod_failed_code {
+enum ofp_port_mod_failed_code(wire_type=uint16_t) {
     OFPPMFC_BAD_PORT = 0,
     OFPPMFC_BAD_HW_ADDR = 1,
 };
 
-enum ofp_queue_op_failed_code {
+enum ofp_queue_op_failed_code(wire_type=uint16_t) {
     OFPQOFC_BAD_PORT = 0,
     OFPQOFC_BAD_QUEUE = 1,
     OFPQOFC_EPERM = 2,
 };
 
-struct ofp_header {
+/* XXX rename to of_message */
+struct of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == ?;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_hello {
+struct of_hello : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 0;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_echo_request {
+struct of_echo_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 2;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_echo_reply {
+struct of_echo_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 3;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_experimenter {
+struct of_experimenter : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     uint32_t subtype;
     of_octets_t data;
 };
 
-struct ofp_barrier_request {
+struct of_barrier_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_barrier_reply {
+struct of_barrier_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_request {
+struct of_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 7;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_reply {
+struct of_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 8;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_set_config {
+struct of_set_config : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 9;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_port_desc {
+struct of_port_desc {
     of_port_no_t port_no;
     of_mac_addr_t hw_addr;
     of_port_name_t name;
-    uint32_t config;
-    uint32_t state;
-    uint32_t curr;
-    uint32_t advertised;
-    uint32_t supported;
-    uint32_t peer;
+    enum ofp_port_config config;
+    enum ofp_port_state state;
+    enum ofp_port_features curr;
+    enum ofp_port_features advertised;
+    enum ofp_port_features supported;
+    enum ofp_port_features peer;
 };
 
-struct ofp_features_request {
+struct of_features_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 5;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_features_reply {
+struct of_features_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 6;
     uint16_t length;
     uint32_t xid;
     uint64_t datapath_id;
     uint32_t n_buffers;
     uint8_t n_tables;
-    uint8_t[3] pad;
-    uint32_t capabilities;
+    pad(3);
+    enum ofp_capabilities capabilities;
     uint32_t actions;
     list(of_port_desc_t) ports;
 };
 
-struct ofp_port_status {
+struct of_port_status : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 12;
     uint16_t length;
     uint32_t xid;
-    uint8_t reason;
-    uint8_t[7] pad;
+    enum ofp_port_reason reason;
+    pad(7);
     of_port_desc_t desc;
 };
 
-struct ofp_port_mod {
+struct of_port_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port_no;
@@ -428,120 +437,120 @@
     uint32_t config;
     uint32_t mask;
     uint32_t advertise;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_packet_in {
+struct of_packet_in : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 10;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
     uint16_t total_len;
     of_port_no_t in_port;
     uint8_t reason;
-    uint8_t pad;
+    pad(1);
     of_octets_t data;
 };
 
-struct ofp_action_output {
-    uint16_t type;
+struct of_action_output : of_action {
+    uint16_t type == 0;
     uint16_t len;
     of_port_no_t port;
     uint16_t max_len;
 };
 
-struct ofp_action_set_vlan_vid {
-    uint16_t type;
+struct of_action_set_vlan_vid : of_action {
+    uint16_t type == 1;
     uint16_t len;
     uint16_t vlan_vid;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_strip_vlan {
-    uint16_t type;
+struct of_action_strip_vlan : of_action {
+    uint16_t type == 3;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_set_vlan_pcp {
-    uint16_t type;
+struct of_action_set_vlan_pcp : of_action {
+    uint16_t type == 2;
     uint16_t len;
     uint8_t vlan_pcp;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_set_dl_src {
-    uint16_t type;
+struct of_action_set_dl_src : of_action {
+    uint16_t type == 4;
     uint16_t len;
     of_mac_addr_t dl_addr;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_set_dl_dst {
-    uint16_t type;
+struct of_action_set_dl_dst : of_action {
+    uint16_t type == 5;
     uint16_t len;
     of_mac_addr_t dl_addr;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_set_nw_src {
-    uint16_t type;
+struct of_action_set_nw_src : of_action {
+    uint16_t type == 6;
     uint16_t len;
     uint32_t nw_addr;
 };
 
-struct ofp_action_set_nw_dst {
-    uint16_t type;
+struct of_action_set_nw_dst : of_action {
+    uint16_t type == 7;
     uint16_t len;
     uint32_t nw_addr;
 };
 
-struct ofp_action_set_tp_src {
-    uint16_t type;
+struct of_action_set_tp_src : of_action {
+    uint16_t type == 9;
     uint16_t len;
     uint16_t tp_port;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_tp_dst {
-    uint16_t type;
+struct of_action_set_tp_dst : of_action {
+    uint16_t type == 10;
     uint16_t len;
     uint16_t tp_port;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_nw_tos {
-    uint16_t type;
+struct of_action_set_nw_tos : of_action {
+    uint16_t type == 8;
     uint16_t len;
     uint8_t nw_tos;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_experimenter {
-    uint16_t type;
+struct of_action_experimenter : of_action {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_action_enqueue {
-    uint16_t type;
+struct of_action_enqueue : of_action {
+    uint16_t type == 11;
     uint16_t len;
     of_port_no_t port;
-    uint8_t[6] pad;
+    pad(6);
     uint32_t queue_id;
 };
 
-struct ofp_action {
-    uint16_t type;
+struct of_action {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_packet_out {
+struct of_packet_out : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 13;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
@@ -551,158 +560,233 @@
     of_octets_t data;
 };
 
-struct ofp_match_v1 {
+struct of_match_v1 {
     of_wc_bmap_t wildcards;
     of_port_no_t in_port;
     of_mac_addr_t eth_src;
     of_mac_addr_t eth_dst;
     uint16_t vlan_vid;
     uint8_t vlan_pcp;
-    uint8_t[1] pad1;
+    pad(1);
     uint16_t eth_type;
     uint8_t ip_dscp;
     uint8_t ip_proto;
-    uint8_t[2] pad2;
-    uint32_t ipv4_src;
-    uint32_t ipv4_dst;
+    pad(2);
+    of_ipv4_t ipv4_src;
+    of_ipv4_t ipv4_dst;
     uint16_t tcp_src;
     uint16_t tcp_dst;
 };
 
-struct ofp_flow_add {
+struct of_flow_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     of_match_t match;
     uint64_t cookie;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == ?;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
-    uint16_t flags;
+    enum ofp_flow_mod_flags flags;
     list(of_action_t) actions;
 };
 
-struct ofp_flow_modify {
+struct of_flow_add : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     of_match_t match;
     uint64_t cookie;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 0;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
-    uint16_t flags;
+    enum ofp_flow_mod_flags flags;
     list(of_action_t) actions;
 };
 
-struct ofp_flow_modify_strict {
+struct of_flow_modify : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     of_match_t match;
     uint64_t cookie;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 1;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
-    uint16_t flags;
+    enum ofp_flow_mod_flags flags;
     list(of_action_t) actions;
 };
 
-struct ofp_flow_delete {
+struct of_flow_modify_strict : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     of_match_t match;
     uint64_t cookie;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 2;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
-    uint16_t flags;
+    enum ofp_flow_mod_flags flags;
     list(of_action_t) actions;
 };
 
-struct ofp_flow_delete_strict {
+struct of_flow_delete : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     of_match_t match;
     uint64_t cookie;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 3;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
-    uint16_t flags;
+    enum ofp_flow_mod_flags flags;
     list(of_action_t) actions;
 };
 
-struct ofp_flow_removed {
+struct of_flow_delete_strict : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
+    uint16_t length;
+    uint32_t xid;
+    of_match_t match;
+    uint64_t cookie;
+    of_fm_cmd_t _command == 4;
+    uint16_t idle_timeout;
+    uint16_t hard_timeout;
+    uint16_t priority;
+    uint32_t buffer_id;
+    of_port_no_t out_port;
+    enum ofp_flow_mod_flags flags;
+    list(of_action_t) actions;
+};
+
+struct of_flow_removed : of_header {
+    uint8_t version;
+    uint8_t type == 11;
     uint16_t length;
     uint32_t xid;
     of_match_t match;
     uint64_t cookie;
     uint16_t priority;
     uint8_t reason;
-    uint8_t[1] pad;
+    pad(1);
     uint32_t duration_sec;
     uint32_t duration_nsec;
     uint16_t idle_timeout;
-    uint8_t[2] pad2;
+    pad(2);
     uint64_t packet_count;
     uint64_t byte_count;
 };
 
-struct ofp_error_msg {
+struct of_error_msg : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 1;
     uint16_t length;
     uint32_t xid;
-    uint16_t err_type;
-    uint16_t code;
+    uint16_t err_type == ?;
+};
+
+struct of_hello_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 0;
+    enum ofp_hello_failed_code code;
+    of_octets_t data;
+};
+
+struct of_bad_request_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 1;
+    enum ofp_bad_request_code code;
+    of_octets_t data;
+};
+
+struct of_bad_action_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 2;
+    enum ofp_bad_action_code code;
+    of_octets_t data;
+};
+
+struct of_flow_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 3;
+    enum ofp_flow_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_port_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 4;
+    enum ofp_port_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_queue_op_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 5;
+    enum ofp_queue_op_failed_code code;
     of_octets_t data;
 };
 
 // STATS ENTRIES: flow, table, port, queue,
-struct ofp_flow_stats_entry {
+struct of_flow_stats_entry {
     uint16_t length;
     uint8_t table_id;
-    uint8_t pad;
+    pad(1);
     of_match_t match;
     uint32_t duration_sec;
     uint32_t duration_nsec;
     uint16_t priority;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
-    uint8_t[6] pad2;
+    pad(6);
     uint64_t cookie;
     uint64_t packet_count;
     uint64_t byte_count;
     list(of_action_t) actions;
 };
 
-struct ofp_table_stats_entry {
+struct of_table_stats_entry {
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_table_name_t name;
     of_wc_bmap_t wildcards;
     uint32_t max_entries;
@@ -711,9 +795,9 @@
     uint64_t matched_count;
 };
 
-struct ofp_port_stats_entry {
+struct of_port_stats_entry {
     of_port_no_t port_no;
-    uint8_t[6] pad;
+    pad(6);
     uint64_t rx_packets;
     uint64_t tx_packets;
     uint64_t rx_bytes;
@@ -728,9 +812,9 @@
     uint64_t collisions;
 };
 
-struct ofp_queue_stats_entry {
+struct of_queue_stats_entry {
     of_port_no_t port_no;
-    uint8_t[2] pad;
+    pad(2);
     uint32_t queue_id;
     uint64_t tx_bytes;
     uint64_t tx_packets;
@@ -739,22 +823,40 @@
 
 // STATS request/reply:  Desc, flow, agg, table, port, queue
 
-struct ofp_desc_stats_request {
+struct of_stats_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == ?;
+    enum ofp_stats_request_flags flags;
 };
 
-struct ofp_desc_stats_reply {
+struct of_stats_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == ?;
+    enum ofp_stats_reply_flags flags;
+};
+
+struct of_desc_stats_request : of_stats_request {
+    uint8_t version;
+    uint8_t type == 16;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_request_flags flags;
+};
+
+struct of_desc_stats_reply : of_stats_reply {
+    uint8_t version;
+    uint8_t type == 17;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_reply_flags flags;
     of_desc_str_t mfr_desc;
     of_desc_str_t hw_desc;
     of_desc_str_t sw_desc;
@@ -762,177 +864,177 @@
     of_desc_str_t dp_desc;
 };
 
-struct ofp_flow_stats_request {
+struct of_flow_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 1;
+    enum ofp_stats_request_flags flags;
     of_match_t match;
     uint8_t table_id;
-    uint8_t pad;
+    pad(1);
     of_port_no_t out_port;
 };
 
-struct ofp_flow_stats_reply {
+struct of_flow_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 1;
+    enum ofp_stats_reply_flags flags;
     list(of_flow_stats_entry_t) entries;
 };
 
-struct ofp_aggregate_stats_request {
+struct of_aggregate_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 2;
+    enum ofp_stats_request_flags flags;
     of_match_t match;
     uint8_t table_id;
-    uint8_t pad;
+    pad(1);
     of_port_no_t out_port;
 };
 
-struct ofp_aggregate_stats_reply {
+struct of_aggregate_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 2;
+    enum ofp_stats_reply_flags flags;
     uint64_t packet_count;
     uint64_t byte_count;
     uint32_t flow_count;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_table_stats_request {
+struct of_table_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 3;
+    enum ofp_stats_request_flags flags;
 };
 
-struct ofp_table_stats_reply {
+struct of_table_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 3;
+    enum ofp_stats_reply_flags flags;
     list(of_table_stats_entry_t) entries;
 };
 
-struct ofp_port_stats_request {
+struct of_port_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 4;
+    enum ofp_stats_request_flags flags;
     of_port_no_t port_no;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_port_stats_reply {
+struct of_port_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 4;
+    enum ofp_stats_reply_flags flags;
     list(of_port_stats_entry_t) entries;
 };
 
-struct ofp_queue_stats_request {
+struct of_queue_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 5;
+    enum ofp_stats_request_flags flags;
     of_port_no_t port_no;
-    uint8_t[2] pad;
+    pad(2);
     uint32_t queue_id;
 };
 
-struct ofp_queue_stats_reply {
+struct of_queue_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
+    uint16_t stats_type == 5;
+    enum ofp_stats_reply_flags flags;
     list(of_queue_stats_entry_t) entries;
 };
 
-struct ofp_experimenter_stats_request {
+struct of_experimenter_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint32_t experimenter;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_experimenter_stats_reply {
+struct of_experimenter_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint32_t experimenter;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
 // END OF STATS OBJECTS
 
-struct ofp_queue_prop {
-    uint16_t type;
+struct of_queue_prop {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_prop_min_rate {
-    uint16_t type;
+struct of_queue_prop_min_rate : of_queue_prop {
+    uint16_t type == 1;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint16_t rate;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_packet_queue {
+struct of_packet_queue {
     uint32_t queue_id;
     uint16_t len;
-    uint8_t[2] pad;
+    pad(2);
     list(of_queue_prop_t) properties;
 };
 
-struct ofp_queue_get_config_request {
+struct of_queue_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 20;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_queue_get_config_reply {
+struct of_queue_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 21;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[6] pad;
+    pad(6);
     list(of_packet_queue_t) queues;
 };
diff --git a/openflow_input/standard-1.1 b/openflow_input/standard-1.1
index 1c551d2..a9aac6b 100644
--- a/openflow_input/standard-1.1
+++ b/openflow_input/standard-1.1
@@ -35,8 +35,8 @@
     OFP_MAX_TABLE_NAME_LEN = 32,
     OFP_MAX_PORT_NAME_LEN = 16,
 
-    OFP_TCP_PORT = 6633,
-    OFP_SSL_PORT = 6633,
+    OFP_TCP_PORT = 6653,
+    OFP_SSL_PORT = 6653,
 
     OFP_ETH_ALEN = 6,
 
@@ -63,7 +63,7 @@
     OFPQ_MIN_RATE_UNCFG = 0xffff,
 };
 
-enum ofp_port {
+enum ofp_port(wire_type=uint32_t) {
     OFPP_MAX = 0xffffff00,
     OFPP_IN_PORT = 0xfffffff8,
     OFPP_TABLE = 0xfffffff9,
@@ -72,13 +72,10 @@
     OFPP_ALL = 0xfffffffc,
     OFPP_CONTROLLER = 0xfffffffd,
     OFPP_LOCAL = 0xfffffffe,
-};
-
-enum ofp_port_no {
     OFPP_ANY = 0xffffffff,
 };
 
-enum ofp_type {
+enum ofp_type(wire_type=uint8_t) {
     OFPT_HELLO = 0,
     OFPT_ERROR = 1,
     OFPT_ECHO_REQUEST = 2,
@@ -105,22 +102,22 @@
     OFPT_QUEUE_GET_CONFIG_REPLY = 23,
 };
 
-enum ofp_config_flags {
+enum ofp_config_flags(wire_type=uint16_t, bitmask=True) {
     OFPC_FRAG_NORMAL = 0,
     OFPC_FRAG_DROP = 1,
     OFPC_FRAG_REASM = 2,
-    OFPC_FRAG_MASK = 3,
+    OFPC_FRAG_MASK(virtual=True) = 3,
     OFPC_INVALID_TTL_TO_CONTROLLER = 4,
 };
 
-enum ofp_table_config {
+enum ofp_table_config(wire_type=uint32_t, bitmask=True) {
     OFPTC_TABLE_MISS_CONTROLLER = 0,
     OFPTC_TABLE_MISS_CONTINUE = 1,
     OFPTC_TABLE_MISS_DROP = 2,
-    OFPTC_TABLE_MISS_MASK = 3,
+    OFPTC_TABLE_MISS_MASK(virtual=True) = 3,
 };
 
-enum ofp_capabilities {
+enum ofp_capabilities(wire_type=uint32_t, bitmask=True) {
     OFPC_FLOW_STATS = 0x1,
     OFPC_TABLE_STATS = 0x2,
     OFPC_PORT_STATS = 0x4,
@@ -130,20 +127,21 @@
     OFPC_ARP_MATCH_IP = 0x80,
 };
 
-enum ofp_port_config {
+enum ofp_port_config(wire_type=uint32_t, bitmask=True) {
     OFPPC_PORT_DOWN = 0x1,
     OFPPC_NO_RECV = 0x4,
     OFPPC_NO_FWD = 0x20,
     OFPPC_NO_PACKET_IN = 0x40,
+    OFPPC_BSN_MIRROR_DEST = 0x80000000,
 };
 
-enum ofp_port_state {
+enum ofp_port_state(wire_type=uint32_t, bitmask=True) {
     OFPPS_LINK_DOWN = 0x1,
     OFPPS_BLOCKED = 0x2,
     OFPPS_LIVE = 0x4,
 };
 
-enum ofp_port_features {
+enum ofp_port_features(wire_type=uint32_t, bitmask=True) {
     OFPPF_10MB_HD = 0x1,
     OFPPF_10MB_FD = 0x2,
     OFPPF_100MB_HD = 0x4,
@@ -162,18 +160,18 @@
     OFPPF_PAUSE_ASYM = 0x8000,
 };
 
-enum ofp_port_reason {
+enum ofp_port_reason(wire_type=uint8_t) {
     OFPPR_ADD = 0,
     OFPPR_DELETE = 1,
     OFPPR_MODIFY = 2,
 };
 
-enum ofp_packet_in_reason {
+enum ofp_packet_in_reason(wire_type=uint8_t) {
     OFPR_NO_MATCH = 0,
     OFPR_ACTION = 1,
 };
 
-enum ofp_action_type {
+enum ofp_action_type(wire_type=uint16_t) {
     OFPAT_OUTPUT = 0,
     OFPAT_SET_VLAN_VID = 1,
     OFPAT_SET_VLAN_PCP = 2,
@@ -202,7 +200,7 @@
     OFPAT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_flow_mod_command {
+enum ofp_flow_mod_command(wire_type=uint8_t) {
     OFPFC_ADD = 0,
     OFPFC_MODIFY = 1,
     OFPFC_MODIFY_STRICT = 2,
@@ -210,13 +208,13 @@
     OFPFC_DELETE_STRICT = 4,
 };
 
-enum ofp_group_mod_command {
+enum ofp_group_mod_command(wire_type=uint16_t) {
     OFPGC_ADD = 0,
     OFPGC_MODIFY = 1,
     OFPGC_DELETE = 2,
 };
 
-enum ofp_flow_wildcards {
+enum ofp_flow_wildcards(wire_type=uint32_t, bitmask=True) {
     OFPFW_IN_PORT = 0x1,
     OFPFW_DL_VLAN = 0x2,
     OFPFW_DL_VLAN_PCP = 0x4,
@@ -227,19 +225,19 @@
     OFPFW_TP_DST = 0x80,
     OFPFW_MPLS_LABEL = 0x100,
     OFPFW_MPLS_TC = 0x200,
-    OFPFW_ALL = 0x3ff,
+    OFPFW_ALL(virtual=True) = 0x3ff,
 };
 
-enum ofp_vlan_id {
+enum ofp_vlan_id(wire_type=uint16_t) {
     OFPVID_ANY = 0xfffe,
     OFPVID_NONE = 0xffff,
 };
 
-enum ofp_match_type {
+enum ofp_match_type(wire_type=uint16_t) {
     OFPMT_STANDARD = 0,
 };
 
-enum ofp_instruction_type {
+enum ofp_instruction_type(wire_type=uint16_t, bitmask=True) {
     OFPIT_GOTO_TABLE = 0x1,
     OFPIT_WRITE_METADATA = 0x2,
     OFPIT_WRITE_ACTIONS = 0x3,
@@ -248,32 +246,32 @@
     OFPIT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_flow_mod_flags {
+enum ofp_flow_mod_flags(wire_type=uint16_t, bitmask=True) {
     OFPFF_SEND_FLOW_REM = 0x1,
     OFPFF_CHECK_OVERLAP = 0x2,
 };
 
-enum ofp_group {
+enum ofp_group(wire_type=uint32_t, complete=False) {
     OFPG_MAX = 0xffffff00,
     OFPG_ALL = 0xfffffffc,
     OFPG_ANY = 0xffffffff,
 };
 
-enum ofp_group_type {
+enum ofp_group_type(wire_type=uint8_t) {
     OFPGT_ALL = 0,
     OFPGT_SELECT = 1,
     OFPGT_INDIRECT = 2,
     OFPGT_FF = 3,
 };
 
-enum ofp_flow_removed_reason {
+enum ofp_flow_removed_reason(wire_type=uint8_t) {
     OFPRR_IDLE_TIMEOUT = 0,
     OFPRR_HARD_TIMEOUT = 1,
     OFPRR_DELETE = 2,
     OFPRR_GROUP_DELETE = 3,
 };
 
-enum ofp_error_type {
+enum ofp_error_type(wire_type=uint16_t) {
     OFPET_HELLO_FAILED = 0,
     OFPET_BAD_REQUEST = 1,
     OFPET_BAD_ACTION = 2,
@@ -287,12 +285,12 @@
     OFPET_SWITCH_CONFIG_FAILED = 10,
 };
 
-enum ofp_hello_failed_code {
+enum ofp_hello_failed_code(wire_type=uint16_t) {
     OFPHFC_INCOMPATIBLE = 0,
     OFPHFC_EPERM = 1,
 };
 
-enum ofp_bad_request_code {
+enum ofp_bad_request_code(wire_type=uint16_t) {
     OFPBRC_BAD_VERSION = 0,
     OFPBRC_BAD_TYPE = 1,
     OFPBRC_BAD_STAT = 2,
@@ -305,7 +303,7 @@
     OFPBRC_BAD_TABLE_ID = 9,
 };
 
-enum ofp_bad_action_code {
+enum ofp_bad_action_code(wire_type=uint16_t) {
     OFPBAC_BAD_TYPE = 0,
     OFPBAC_BAD_LEN = 1,
     OFPBAC_BAD_EXPERIMENTER = 2,
@@ -321,7 +319,7 @@
     OFPBAC_BAD_TAG = 12,
 };
 
-enum ofp_bad_instruction_code {
+enum ofp_bad_instruction_code(wire_type=uint16_t) {
     OFPBIC_UNKNOWN_INST = 0,
     OFPBIC_UNSUP_INST = 1,
     OFPBIC_BAD_TABLE_ID = 2,
@@ -330,7 +328,7 @@
     OFPBIC_UNSUP_EXP_INST = 5,
 };
 
-enum ofp_bad_match_code {
+enum ofp_bad_match_code(wire_type=uint16_t) {
     OFPBMC_BAD_TYPE = 0,
     OFPBMC_BAD_LEN = 1,
     OFPBMC_BAD_TAG = 2,
@@ -341,7 +339,7 @@
     OFPBMC_BAD_VALUE = 7,
 };
 
-enum ofp_flow_mod_failed_code {
+enum ofp_flow_mod_failed_code(wire_type=uint16_t) {
     OFPFMFC_UNKNOWN = 0,
     OFPFMFC_TABLE_FULL = 1,
     OFPFMFC_BAD_TABLE_ID = 2,
@@ -351,7 +349,7 @@
     OFPFMFC_BAD_COMMAND = 6,
 };
 
-enum ofp_group_mod_failed_code {
+enum ofp_group_mod_failed_code(wire_type=uint16_t) {
     OFPGMFC_GROUP_EXISTS = 0,
     OFPGMFC_INVALID_GROUP = 1,
     OFPGMFC_WEIGHT_UNSUPPORTED = 2,
@@ -363,30 +361,30 @@
     OFPGMFC_UNKNOWN_GROUP = 8,
 };
 
-enum ofp_port_mod_failed_code {
+enum ofp_port_mod_failed_code(wire_type=uint16_t) {
     OFPPMFC_BAD_PORT = 0,
     OFPPMFC_BAD_HW_ADDR = 1,
     OFPPMFC_BAD_CONFIG = 2,
     OFPPMFC_BAD_ADVERTISE = 3,
 };
 
-enum ofp_table_mod_failed_code {
+enum ofp_table_mod_failed_code(wire_type=uint16_t) {
     OFPTMFC_BAD_TABLE = 0,
     OFPTMFC_BAD_CONFIG = 1,
 };
 
-enum ofp_queue_op_failed_code {
+enum ofp_queue_op_failed_code(wire_type=uint16_t) {
     OFPQOFC_BAD_PORT = 0,
     OFPQOFC_BAD_QUEUE = 1,
     OFPQOFC_EPERM = 2,
 };
 
-enum ofp_switch_config_failed_code {
+enum ofp_switch_config_failed_code(wire_type=uint16_t) {
     OFPSCFC_BAD_FLAGS = 0,
     OFPSCFC_BAD_LEN = 1,
 };
 
-enum ofp_stats_types {
+enum ofp_stats_type(wire_type=uint16_t) {
     OFPST_DESC = 0,
     OFPST_FLOW = 1,
     OFPST_AGGREGATE = 2,
@@ -398,169 +396,175 @@
     OFPST_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_stats_reply_flags {
+// none defined
+enum ofp_stats_request_flags(wire_type=uint16_t, bitmask=True) {
+};
+
+
+enum ofp_stats_reply_flags(wire_type=uint16_t, bitmask=True) {
     OFPSF_REPLY_MORE = 0x1,
 };
 
-enum ofp_queue_properties {
+enum ofp_queue_properties(wire_type=uint16_t) {
     OFPQT_NONE = 0,
     OFPQT_MIN_RATE = 1,
 };
 
-struct ofp_header {
+/* XXX rename to of_message */
+struct of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == ?;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_hello {
+struct of_hello : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 0;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_echo_request {
+struct of_echo_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 2;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_echo_reply {
+struct of_echo_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 3;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_experimenter {
+struct of_experimenter : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     uint32_t subtype;
     of_octets_t data;
 };
 
-struct ofp_barrier_request {
+struct of_barrier_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 20;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_barrier_reply {
+struct of_barrier_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 21;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_request {
+struct of_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 7;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_reply {
+struct of_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 8;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_set_config {
+struct of_set_config : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 9;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_table_mod {
+struct of_table_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t config;
 };
 
-struct ofp_port_desc {
+struct of_port_desc {
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     of_mac_addr_t hw_addr;
-    uint8_t[2] pad2;
+    pad(2);
     of_port_name_t name;
-    uint32_t config;
-    uint32_t state;
-    uint32_t curr;
-    uint32_t advertised;
-    uint32_t supported;
-    uint32_t peer;
+    enum ofp_port_config config;
+    enum ofp_port_state state;
+    enum ofp_port_features curr;
+    enum ofp_port_features advertised;
+    enum ofp_port_features supported;
+    enum ofp_port_features peer;
     uint32_t curr_speed;
     uint32_t max_speed;
 };
 
-struct ofp_features_request {
+struct of_features_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 5;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_features_reply {
+struct of_features_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 6;
     uint16_t length;
     uint32_t xid;
     uint64_t datapath_id;
     uint32_t n_buffers;
     uint8_t n_tables;
-    uint8_t[3] pad;
-    uint32_t capabilities;
+    pad(3);
+    enum ofp_capabilities capabilities;
     uint32_t reserved;
     list(of_port_desc_t) ports;
 };
 
-struct ofp_port_status {
+struct of_port_status : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 12;
     uint16_t length;
     uint32_t xid;
-    uint8_t reason;
-    uint8_t[7] pad;
+    enum ofp_port_reason reason;
+    pad(7);
     of_port_desc_t desc;
 };
 
-struct ofp_port_mod {
+struct of_port_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     of_mac_addr_t hw_addr;
-    uint8_t[2] pad2;
+    pad(2);
     uint32_t config;
     uint32_t mask;
     uint32_t advertise;
-    uint8_t[4] pad3;
+    pad(4);
 };
 
-struct ofp_packet_in {
+struct of_packet_in : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 10;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
@@ -572,200 +576,200 @@
     of_octets_t data;
 };
 
-struct ofp_action_output {
-    uint16_t type;
+struct of_action_output : of_action {
+    uint16_t type == 0;
     uint16_t len;
     of_port_no_t port;
     uint16_t max_len;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_set_vlan_vid {
-    uint16_t type;
+struct of_action_set_vlan_vid : of_action {
+    uint16_t type == 1;
     uint16_t len;
     uint16_t vlan_vid;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_vlan_pcp {
-    uint16_t type;
+struct of_action_set_vlan_pcp : of_action {
+    uint16_t type == 2;
     uint16_t len;
     uint8_t vlan_pcp;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_set_dl_src {
-    uint16_t type;
+struct of_action_set_dl_src : of_action {
+    uint16_t type == 3;
     uint16_t len;
     of_mac_addr_t dl_addr;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_set_dl_dst {
-    uint16_t type;
+struct of_action_set_dl_dst : of_action {
+    uint16_t type == 4;
     uint16_t len;
     of_mac_addr_t dl_addr;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_set_nw_src {
-    uint16_t type;
+struct of_action_set_nw_src : of_action {
+    uint16_t type == 5;
     uint16_t len;
     uint32_t nw_addr;
 };
 
-struct ofp_action_set_nw_dst {
-    uint16_t type;
+struct of_action_set_nw_dst : of_action {
+    uint16_t type == 6;
     uint16_t len;
     uint32_t nw_addr;
 };
 
-struct ofp_action_set_nw_tos {
-    uint16_t type;
+struct of_action_set_nw_tos : of_action {
+    uint16_t type == 7;
     uint16_t len;
     uint8_t nw_tos;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_set_nw_ecn {
-    uint16_t type;
+struct of_action_set_nw_ecn : of_action {
+    uint16_t type == 8;
     uint16_t len;
     uint8_t nw_ecn;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_set_tp_src {
-    uint16_t type;
+struct of_action_set_tp_src : of_action {
+    uint16_t type == 9;
     uint16_t len;
     uint16_t tp_port;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_tp_dst {
-    uint16_t type;
+struct of_action_set_tp_dst : of_action {
+    uint16_t type == 10;
     uint16_t len;
     uint16_t tp_port;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_copy_ttl_out {
-    uint16_t type;
+struct of_action_copy_ttl_out : of_action {
+    uint16_t type == 11;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_copy_ttl_in {
-    uint16_t type;
+struct of_action_copy_ttl_in : of_action {
+    uint16_t type == 12;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_set_mpls_label {
-    uint16_t type;
+struct of_action_set_mpls_label : of_action {
+    uint16_t type == 13;
     uint16_t len;
     uint32_t mpls_label;
 };
 
-struct ofp_action_set_mpls_tc {
-    uint16_t type;
+struct of_action_set_mpls_tc : of_action {
+    uint16_t type == 14;
     uint16_t len;
     uint8_t mpls_tc;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_set_mpls_ttl {
-    uint16_t type;
+struct of_action_set_mpls_ttl : of_action {
+    uint16_t type == 15;
     uint16_t len;
     uint8_t mpls_ttl;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_dec_mpls_ttl {
-    uint16_t type;
+struct of_action_dec_mpls_ttl : of_action {
+    uint16_t type == 16;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_vlan {
-    uint16_t type;
+struct of_action_push_vlan : of_action {
+    uint16_t type == 17;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_pop_vlan {
-    uint16_t type;
+struct of_action_pop_vlan : of_action {
+    uint16_t type == 18;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_mpls {
-    uint16_t type;
+struct of_action_push_mpls : of_action {
+    uint16_t type == 19;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_pop_mpls {
-    uint16_t type;
+struct of_action_pop_mpls : of_action {
+    uint16_t type == 20;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_queue {
-    uint16_t type;
+struct of_action_set_queue : of_action {
+    uint16_t type == 21;
     uint16_t len;
     uint32_t queue_id;
 };
 
-struct ofp_action_group {
-    uint16_t type;
+struct of_action_group : of_action {
+    uint16_t type == 22;
     uint16_t len;
     uint32_t group_id;
 };
 
-struct ofp_action_set_nw_ttl {
-    uint16_t type;
+struct of_action_set_nw_ttl : of_action {
+    uint16_t type == 23;
     uint16_t len;
     uint8_t nw_ttl;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_dec_nw_ttl {
-    uint16_t type;
+struct of_action_dec_nw_ttl : of_action {
+    uint16_t type == 24;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_experimenter {
-    uint16_t type;
+struct of_action_experimenter : of_action {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_action {
-    uint16_t type;
+struct of_action {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_packet_out {
+struct of_packet_out : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 13;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
     of_port_no_t in_port;
     uint16_t actions_len;
-    uint8_t[6] pad;
+    pad(6);
     list(of_action_t) actions;
     of_octets_t data;
 };
 
-struct ofp_match_v2 {
-    uint16_t type;
+struct of_match_v2 {
+    uint16_t type == 0;
     uint16_t length;
     of_port_no_t in_port;
     of_wc_bmap_t wildcards;
@@ -775,200 +779,257 @@
     of_mac_addr_t eth_dst_mask;
     uint16_t vlan_vid;
     uint8_t vlan_pcp;
-    uint8_t[1] pad1;
+    pad(1);
     uint16_t eth_type;
     uint8_t ip_dscp;
     uint8_t ip_proto;
-    uint32_t ipv4_src;
-    uint32_t ipv4_src_mask;
-    uint32_t ipv4_dst;
-    uint32_t ipv4_dst_mask;
+    of_ipv4_t ipv4_src;
+    of_ipv4_t ipv4_src_mask;
+    of_ipv4_t ipv4_dst;
+    of_ipv4_t ipv4_dst_mask;
     uint16_t tcp_src;
     uint16_t tcp_dst;
     uint32_t mpls_label;
     uint8_t mpls_tc;
-    uint8_t[3] pad2;
+    pad(3);
     uint64_t metadata;
     uint64_t metadata_mask;
 };
 
-struct ofp_instruction {
-    uint16_t type;
+struct of_instruction {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction_goto_table {
-    uint16_t type;
+struct of_instruction_goto_table : of_instruction {
+    uint16_t type == 1;
     uint16_t len;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_instruction_write_metadata {
-    uint16_t type;
+struct of_instruction_write_metadata : of_instruction {
+    uint16_t type == 2;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t metadata;
     uint64_t metadata_mask;
 };
 
-struct ofp_instruction_write_actions {
-    uint16_t type;
+struct of_instruction_write_actions : of_instruction {
+    uint16_t type == 3;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_instruction_apply_actions {
-    uint16_t type;
+struct of_instruction_apply_actions : of_instruction {
+    uint16_t type == 4;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_instruction_clear_actions {
-    uint16_t type;
+struct of_instruction_clear_actions : of_instruction {
+    uint16_t type == 5;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction_experimenter {
-    uint16_t type;		
+struct of_instruction_experimenter : of_instruction {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_flow_add {
+struct of_flow_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == ?;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_modify {
+struct of_flow_add : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 0;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_modify_strict {
+struct of_flow_modify : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 1;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_delete {
+struct of_flow_modify_strict : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 2;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_delete_strict {
+struct of_flow_delete : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 3;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_bucket {
+struct of_flow_delete_strict : of_flow_mod {
+    uint8_t version;
+    uint8_t type == 14;
+    uint16_t length;
+    uint32_t xid;
+    uint64_t cookie;
+    uint64_t cookie_mask;
+    uint8_t table_id;
+    of_fm_cmd_t _command == 4;
+    uint16_t idle_timeout;
+    uint16_t hard_timeout;
+    uint16_t priority;
+    uint32_t buffer_id;
+    of_port_no_t out_port;
+    uint32_t out_group;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
+    of_match_t match;
+    list(of_instruction_t) instructions;
+};
+
+struct of_bucket {
     uint16_t len;
     uint16_t weight;
     of_port_no_t watch_port;
     uint32_t watch_group;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_group_mod {
+struct of_group_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
     uint16_t length;
     uint32_t xid;
-    uint16_t command;
-    uint8_t group_type;
-    uint8_t pad;
+    enum ofp_group_mod_command command == ?;
+    enum ofp_group_type group_type;
+    pad(1);
     uint32_t group_id;
     list(of_bucket_t) buckets;
 };
 
-struct ofp_flow_removed {
+struct of_group_add : of_group_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 0;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_group_modify : of_group_mod {
+    uint8_t version;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 1;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_group_delete : of_group_mod {
+    uint8_t version;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 2;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_flow_removed : of_header {
+    uint8_t version;
+    uint8_t type == 11;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
@@ -978,34 +1039,142 @@
     uint32_t duration_sec;
     uint32_t duration_nsec;
     uint16_t idle_timeout;
-    uint8_t[2] pad2;
+    pad(2);
     uint64_t packet_count;
     uint64_t byte_count;
     of_match_t match;
 };
 
-struct ofp_error_msg {
+struct of_error_msg : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 1;
     uint16_t length;
     uint32_t xid;
-    uint16_t err_type;
-    uint16_t code;
+    uint16_t err_type == ?;
+};
+
+struct of_hello_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 0;
+    enum ofp_hello_failed_code code;
+    of_octets_t data;
+};
+
+struct of_bad_request_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 1;
+    enum ofp_bad_request_code code;
+    of_octets_t data;
+};
+
+struct of_bad_action_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 2;
+    enum ofp_bad_action_code code;
+    of_octets_t data;
+};
+
+struct of_bad_instruction_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 3;
+    enum ofp_bad_instruction_code code;
+    of_octets_t data;
+};
+
+struct of_bad_match_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 4;
+    enum ofp_bad_match_code code;
+    of_octets_t data;
+};
+
+struct of_flow_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 5;
+    enum ofp_flow_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_group_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 6;
+    enum ofp_group_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_port_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 7;
+    enum ofp_port_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_table_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 8;
+    enum ofp_table_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_queue_op_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 9;
+    enum ofp_queue_op_failed_code code;
+    of_octets_t data;
+};
+
+struct of_switch_config_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 10;
+    enum ofp_switch_config_failed_code code;
     of_octets_t data;
 };
 
 // STATS ENTRIES:  flow, table, port, group, group_desc
 
-struct ofp_flow_stats_entry {
+struct of_flow_stats_entry {
     uint16_t length;
     uint8_t table_id;
-    uint8_t pad;
+    pad(1);
     uint32_t duration_sec;
     uint32_t duration_nsec;
     uint16_t priority;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
-    uint8_t[6] pad2;
+    pad(6);
     uint64_t cookie;
     uint64_t packet_count;
     uint64_t byte_count;
@@ -1013,9 +1182,9 @@
     list(of_instruction_t) instructions;
 };
 
-struct ofp_table_stats_entry {
+struct of_table_stats_entry {
     uint8_t table_id;
-    uint8_t[7] pad;
+    pad(7);
     of_table_name_t name;
     of_wc_bmap_t wildcards;
     of_match_bmap_t match;
@@ -1029,9 +1198,9 @@
     uint64_t matched_count;
 };
 
-struct ofp_port_stats_entry {
+struct of_port_stats_entry {
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t rx_packets;
     uint64_t tx_packets;
     uint64_t rx_bytes;
@@ -1046,7 +1215,7 @@
     uint64_t collisions;
 };
 
-struct ofp_queue_stats_entry {
+struct of_queue_stats_entry {
     of_port_no_t port_no;
     uint32_t queue_id;
     uint64_t tx_bytes;
@@ -1054,50 +1223,70 @@
     uint64_t tx_errors;
 };
 
-struct ofp_bucket_counter {
+struct of_bucket_counter {
     uint64_t packet_count;
     uint64_t byte_count;
 };
 
-struct ofp_group_stats_entry {
+struct of_group_stats_entry {
     uint16_t length;
-    uint8_t[2] pad;
+    pad(2);
     uint32_t group_id;
     uint32_t ref_count;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t packet_count;
     uint64_t byte_count;
     list(of_bucket_counter_t) bucket_stats;
 };
 
-struct ofp_group_desc_stats_entry {
+struct of_group_desc_stats_entry {
     uint16_t length;
-    uint8_t type;
-    uint8_t pad;
+    enum ofp_group_type group_type;
+    pad(1);
     uint32_t group_id;
     list(of_bucket_t) buckets;
 };
 
 // STATS:  Desc, flow, agg, table, port, queue, group, group_desc, experi
 
-struct ofp_desc_stats_request {
+struct of_stats_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == ?;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_desc_stats_reply {
+struct of_stats_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == ?;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+};
+
+struct of_desc_stats_request : of_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+};
+
+struct of_desc_stats_reply : of_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     of_desc_str_t mfr_desc;
     of_desc_str_t hw_desc;
     of_desc_str_t sw_desc;
@@ -1105,242 +1294,242 @@
     of_desc_str_t dp_desc;
 };
 
-struct ofp_flow_stats_request {
+struct of_flow_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 1;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_port_no_t out_port;
     uint32_t out_group;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t cookie;
     uint64_t cookie_mask;
     of_match_t match;
 };
 
-struct ofp_flow_stats_reply {
+struct of_flow_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 1;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_flow_stats_entry_t) entries;
 };
 
-struct ofp_aggregate_stats_request {
+struct of_aggregate_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 2;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_port_no_t out_port;
     uint32_t out_group;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t cookie;
     uint64_t cookie_mask;
     of_match_t match;
 };
 
-struct ofp_aggregate_stats_reply {
+struct of_aggregate_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 2;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     uint64_t packet_count;
     uint64_t byte_count;
     uint32_t flow_count;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_table_stats_request {
+struct of_table_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 3;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_table_stats_reply {
+struct of_table_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 3;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_table_stats_entry_t) entries;
 };
 
-struct ofp_port_stats_request {
+struct of_port_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 4;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_port_stats_reply {
+struct of_port_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 4;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_port_stats_entry_t) entries;
 };
 
-struct ofp_queue_stats_request {
+struct of_queue_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 5;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     of_port_no_t port_no;
     uint32_t queue_id;
 };
 
-struct ofp_queue_stats_reply {
+struct of_queue_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 5;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_queue_stats_entry_t) entries;
 };
 
-struct ofp_group_stats_request {
+struct of_group_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 6;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint32_t group_id;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_group_stats_reply {
+struct of_group_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 6;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_group_stats_entry_t) entries;
 };
 
-struct ofp_group_desc_stats_request {
+struct of_group_desc_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 7;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_group_desc_stats_reply {
+struct of_group_desc_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 7;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_group_desc_stats_entry_t) entries;
 };
 
-struct ofp_experimenter_stats_request {
+struct of_experimenter_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
-    uint32_t experimenter;
-    uint8_t[4] pad;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == ?;
+    pad(4);
     of_octets_t data;
 };
 
-struct ofp_experimenter_stats_reply {
+struct of_experimenter_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
-    uint32_t experimenter;
-    uint8_t[4] pad;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == ?;
+    pad(4);
     of_octets_t data;
 };
 
 // END OF STATS OBJECTS
 
-struct ofp_queue_prop {
-    uint16_t type;
+struct of_queue_prop {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_prop_min_rate {
-    uint16_t type;
+struct of_queue_prop_min_rate : of_queue_prop {
+    uint16_t type == 1;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint16_t rate;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_packet_queue {
+struct of_packet_queue {
     uint32_t queue_id;
     uint16_t len;
-    uint8_t[2] pad;
+    pad(2);
     list(of_queue_prop_t) properties;
 };
 
-struct ofp_queue_get_config_request {
+struct of_queue_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 22;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_get_config_reply {
+struct of_queue_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 23;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[4] pad;
+    pad(4);
     list(of_packet_queue_t) queues;
 };
diff --git a/openflow_input/standard-1.2 b/openflow_input/standard-1.2
index 0d48c43..c1e8a2b 100644
--- a/openflow_input/standard-1.2
+++ b/openflow_input/standard-1.2
@@ -35,8 +35,8 @@
     OFP_MAX_TABLE_NAME_LEN = 32,
     OFP_MAX_PORT_NAME_LEN = 16,
 
-    OFP_TCP_PORT = 6633,
-    OFP_SSL_PORT = 6633,
+    OFP_TCP_PORT = 6653,
+    OFP_SSL_PORT = 6653,
 
     OFP_ETH_ALEN = 6,
 
@@ -59,7 +59,7 @@
     OFPQ_MAX_RATE_UNCFG = 0xffff,
 };
 
-enum ofp_port {
+enum ofp_port(wire_type=uint32_t) {
     OFPP_MAX = 0xffffff00,
     OFPP_IN_PORT = 0xfffffff8,
     OFPP_TABLE = 0xfffffff9,
@@ -68,13 +68,10 @@
     OFPP_ALL = 0xfffffffc,
     OFPP_CONTROLLER = 0xfffffffd,
     OFPP_LOCAL = 0xfffffffe,
-};
-
-enum ofp_port_no {
     OFPP_ANY = 0xffffffff,
 };
 
-enum ofp_type {
+enum ofp_type(wire_type=uint8_t) {
     OFPT_HELLO = 0,
     OFPT_ERROR = 1,
     OFPT_ECHO_REQUEST = 2,
@@ -103,7 +100,7 @@
     OFPT_ROLE_REPLY = 25,
 };
 
-enum ofp_config_flags {
+enum ofp_config_flags(wire_type=uint16_t, bitmask=True) {
     OFPC_FRAG_NORMAL = 0,
     OFPC_FRAG_DROP = 1,
     OFPC_FRAG_REASM = 2,
@@ -111,19 +108,19 @@
     OFPC_INVALID_TTL_TO_CONTROLLER = 4,
 };
 
-enum ofp_table_config {
+enum ofp_table_config(wire_type=uint32_t, bitmask=True) {
     OFPTC_TABLE_MISS_CONTROLLER = 0,
     OFPTC_TABLE_MISS_CONTINUE = 1,
     OFPTC_TABLE_MISS_DROP = 2,
     OFPTC_TABLE_MISS_MASK = 3,
 };
 
-enum ofp_table {
+enum ofp_table(wire_type=uint8_t, complete=False) {
     OFPTT_MAX = 0xfe,
     OFPTT_ALL = 0xff,
 };
 
-enum ofp_capabilities {
+enum ofp_capabilities(wire_type=uint32_t, bitmask=True) {
     OFPC_FLOW_STATS = 0x1,
     OFPC_TABLE_STATS = 0x2,
     OFPC_PORT_STATS = 0x4,
@@ -133,20 +130,21 @@
     OFPC_PORT_BLOCKED = 0x100,
 };
 
-enum ofp_port_config {
+enum ofp_port_config(wire_type=uint32_t, bitmask=True) {
     OFPPC_PORT_DOWN = 0x1,
     OFPPC_NO_RECV = 0x4,
     OFPPC_NO_FWD = 0x20,
     OFPPC_NO_PACKET_IN = 0x40,
+    OFPPC_BSN_MIRROR_DEST = 0x80000000,
 };
 
-enum ofp_port_state {
+enum ofp_port_state(wire_type=uint32_t, bitmask=True) {
     OFPPS_LINK_DOWN = 0x1,
     OFPPS_BLOCKED = 0x2,
     OFPPS_LIVE = 0x4,
 };
 
-enum ofp_port_features {
+enum ofp_port_features(wire_type=uint32_t, bitmask=True) {
     OFPPF_10MB_HD = 0x1,
     OFPPF_10MB_FD = 0x2,
     OFPPF_100MB_HD = 0x4,
@@ -165,30 +163,30 @@
     OFPPF_PAUSE_ASYM = 0x8000,
 };
 
-enum ofp_port_reason {
+enum ofp_port_reason(wire_type=uint8_t) {
     OFPPR_ADD = 0,
     OFPPR_DELETE = 1,
     OFPPR_MODIFY = 2,
 };
 
-enum ofp_match_type {
+enum ofp_match_type(wire_type=uint16_t) {
     OFPMT_STANDARD = 0,
     OFPMT_OXM = 1,
 };
 
-enum ofp_oxm_class {
+enum ofp_oxm_class(wire_type=uint16_t) {
     OFPXMC_NXM_0 = 0,
     OFPXMC_NXM_1 = 1,
     OFPXMC_OPENFLOW_BASIC = 0x8000,
     OFPXMC_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_vlan_id {
+enum ofp_vlan_id(wire_type=uint16_t) {
     OFPVID_NONE = 0,
     OFPVID_PRESENT = 0x1000,
 };
 
-enum ofp_action_type {
+enum ofp_action_type(wire_type=uint16_t) {
     OFPAT_OUTPUT = 0,
     OFPAT_COPY_TTL_OUT = 0xb,
     OFPAT_COPY_TTL_IN = 0xc,
@@ -206,12 +204,12 @@
     OFPAT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_controller_max_len {
+enum ofp_controller_max_len(wire_type=uint16_t, complete=False) {
     OFPCML_MAX = 0xffe5,
     OFPCML_NO_BUFFER = 0xffff,
 };
 
-enum ofp_instruction_type {
+enum ofp_instruction_type(wire_type=uint16_t, bitmask=True) {
     OFPIT_GOTO_TABLE = 0x1,
     OFPIT_WRITE_METADATA = 0x2,
     OFPIT_WRITE_ACTIONS = 0x3,
@@ -220,7 +218,7 @@
     OFPIT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_flow_mod_command {
+enum ofp_flow_mod_command(wire_type=uint8_t) {
     OFPFC_ADD = 0,
     OFPFC_MODIFY = 1,
     OFPFC_MODIFY_STRICT = 2,
@@ -228,45 +226,45 @@
     OFPFC_DELETE_STRICT = 4,
 };
 
-enum ofp_flow_mod_flags {
+enum ofp_flow_mod_flags(wire_type=uint16_t, bitmask=True) {
     OFPFF_SEND_FLOW_REM = 0x1,
     OFPFF_CHECK_OVERLAP = 0x2,
     OFPFF_RESET_COUNTS = 0x4,
 };
 
-enum ofp_group {
+enum ofp_group(wire_type=uint32_t, complete=False) {
     OFPG_MAX = 0xffffff00,
     OFPG_ALL = 0xfffffffc,
     OFPG_ANY = 0xffffffff,
 };
 
-enum ofp_group_mod_command {
+enum ofp_group_mod_command(wire_type=uint16_t) {
     OFPGC_ADD = 0,
     OFPGC_MODIFY = 1,
     OFPGC_DELETE = 2,
 };
 
-enum ofp_group_type {
+enum ofp_group_type(wire_type=uint8_t) {
     OFPGT_ALL = 0,
     OFPGT_SELECT = 1,
     OFPGT_INDIRECT = 2,
     OFPGT_FF = 3,
 };
 
-enum ofp_packet_in_reason {
+enum ofp_packet_in_reason(wire_type=uint8_t) {
     OFPR_NO_MATCH = 0,
     OFPR_ACTION = 1,
     OFPR_INVALID_TTL = 2,
 };
 
-enum ofp_flow_removed_reason {
+enum ofp_flow_removed_reason(wire_type=uint8_t) {
     OFPRR_IDLE_TIMEOUT = 0,
     OFPRR_HARD_TIMEOUT = 1,
     OFPRR_DELETE = 2,
     OFPRR_GROUP_DELETE = 3,
 };
 
-enum ofp_error_type {
+enum ofp_error_type(wire_type=uint16_t) {
     OFPET_HELLO_FAILED = 0,
     OFPET_BAD_REQUEST = 1,
     OFPET_BAD_ACTION = 2,
@@ -282,17 +280,17 @@
     OFPET_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_hello_failed_code {
+enum ofp_hello_failed_code(wire_type=uint16_t) {
     OFPHFC_INCOMPATIBLE = 0,
     OFPHFC_EPERM = 1,
 };
 
-enum ofp_bad_request_code {
+enum ofp_bad_request_code(wire_type=uint16_t) {
     OFPBRC_BAD_VERSION = 0,
     OFPBRC_BAD_TYPE = 1,
     OFPBRC_BAD_STAT = 2,
     OFPBRC_BAD_EXPERIMENTER = 3,
-    OFPBRC_BAD_EXP_TYPE = 4,
+    OFPBRC_BAD_EXPERIMENTER_TYPE = 4,
     OFPBRC_EPERM = 5,
     OFPBRC_BAD_LEN = 6,
     OFPBRC_BUFFER_EMPTY = 7,
@@ -303,11 +301,11 @@
     OFPBRC_BAD_PACKET = 12,
 };
 
-enum ofp_bad_action_code {
+enum ofp_bad_action_code(wire_type=uint16_t) {
     OFPBAC_BAD_TYPE = 0,
     OFPBAC_BAD_LEN = 1,
     OFPBAC_BAD_EXPERIMENTER = 2,
-    OFPBAC_BAD_EXP_TYPE = 3,
+    OFPBAC_BAD_EXPERIMENTER_TYPE = 3,
     OFPBAC_BAD_OUT_PORT = 4,
     OFPBAC_BAD_ARGUMENT = 5,
     OFPBAC_EPERM = 6,
@@ -322,19 +320,19 @@
     OFPBAC_BAD_SET_ARGUMENT = 15,
 };
 
-enum ofp_bad_instruction_code {
+enum ofp_bad_instruction_code(wire_type=uint16_t) {
     OFPBIC_UNKNOWN_INST = 0,
     OFPBIC_UNSUP_INST = 1,
     OFPBIC_BAD_TABLE_ID = 2,
     OFPBIC_UNSUP_METADATA = 3,
     OFPBIC_UNSUP_METADATA_MASK = 4,
     OFPBIC_BAD_EXPERIMENTER = 5,
-    OFPBIC_BAD_EXP_TYPE = 6,
+    OFPBIC_BAD_EXPERIMENTER_TYPE = 6,
     OFPBIC_BAD_LEN = 7,
     OFPBIC_EPERM = 8,
 };
 
-enum ofp_bad_match_code {
+enum ofp_bad_match_code(wire_type=uint16_t) {
     OFPBMC_BAD_TYPE = 0,
     OFPBMC_BAD_LEN = 1,
     OFPBMC_BAD_TAG = 2,
@@ -349,7 +347,7 @@
     OFPBMC_EPERM = 11,
 };
 
-enum ofp_flow_mod_failed_code {
+enum ofp_flow_mod_failed_code(wire_type=uint16_t) {
     OFPFMFC_UNKNOWN = 0,
     OFPFMFC_TABLE_FULL = 1,
     OFPFMFC_BAD_TABLE_ID = 2,
@@ -360,7 +358,7 @@
     OFPFMFC_BAD_FLAGS = 7,
 };
 
-enum ofp_group_mod_failed_code {
+enum ofp_group_mod_failed_code(wire_type=uint16_t) {
     OFPGMFC_GROUP_EXISTS = 0,
     OFPGMFC_INVALID_GROUP = 1,
     OFPGMFC_WEIGHT_UNSUPPORTED = 2,
@@ -378,7 +376,7 @@
     OFPGMFC_EPERM = 14,
 };
 
-enum ofp_port_mod_failed_code {
+enum ofp_port_mod_failed_code(wire_type=uint16_t) {
     OFPPMFC_BAD_PORT = 0,
     OFPPMFC_BAD_HW_ADDR = 1,
     OFPPMFC_BAD_CONFIG = 2,
@@ -386,31 +384,31 @@
     OFPPMFC_EPERM = 4,
 };
 
-enum ofp_table_mod_failed_code {
+enum ofp_table_mod_failed_code(wire_type=uint16_t) {
     OFPTMFC_BAD_TABLE = 0,
     OFPTMFC_BAD_CONFIG = 1,
     OFPTMFC_EPERM = 2,
 };
 
-enum ofp_queue_op_failed_code {
+enum ofp_queue_op_failed_code(wire_type=uint16_t) {
     OFPQOFC_BAD_PORT = 0,
     OFPQOFC_BAD_QUEUE = 1,
     OFPQOFC_EPERM = 2,
 };
 
-enum ofp_switch_config_failed_code {
+enum ofp_switch_config_failed_code(wire_type=uint16_t) {
     OFPSCFC_BAD_FLAGS = 0,
     OFPSCFC_BAD_LEN = 1,
     OFPSCFC_EPERM = 2,
 };
 
-enum ofp_role_request_failed_code {
+enum ofp_role_request_failed_code (wire_type=uint16_t){
     OFPRRFC_STALE = 0,
     OFPRRFC_UNSUP = 1,
     OFPRRFC_BAD_ROLE = 2,
 };
 
-enum ofp_stats_types {
+enum ofp_stats_type(wire_type=uint16_t) {
     OFPST_DESC = 0,
     OFPST_FLOW = 1,
     OFPST_AGGREGATE = 2,
@@ -423,487 +421,542 @@
     OFPST_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_stats_reply_flags {
+enum ofp_stats_request_flags(wire_type=uint16_t, bitmask=True) {
+};
+
+enum ofp_stats_reply_flags(wire_type=uint16_t, bitmask=True) {
     OFPSF_REPLY_MORE = 0x1,
 };
 
-enum ofp_group_capabilities {
+enum ofp_group_capabilities(wire_type=uint32_t, bitmask=True) {
     OFPGFC_SELECT_WEIGHT = 0x1,
     OFPGFC_SELECT_LIVENESS = 0x2,
     OFPGFC_CHAINING = 0x4,
     OFPGFC_CHAINING_CHECKS = 0x8,
 };
 
-enum ofp_queue_properties {
+enum ofp_queue_properties(wire_type=uint16_t) {
     OFPQT_MIN_RATE = 0x1,
     OFPQT_MAX_RATE = 0x2,
     OFPQT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_controller_role {
+enum ofp_controller_role(wire_type=uint32_t) {
     OFPCR_ROLE_NOCHANGE = 0,
     OFPCR_ROLE_EQUAL = 1,
     OFPCR_ROLE_MASTER = 2,
     OFPCR_ROLE_SLAVE = 3,
 };
 
-struct ofp_header {
+/* XXX rename to of_message */
+struct of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == ?;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_hello {
+struct of_hello : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 0;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_echo_request {
+struct of_echo_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 2;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_echo_reply {
+struct of_echo_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 3;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_experimenter {
+struct of_experimenter : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     uint32_t subtype;
     of_octets_t data;
 };
 
-struct ofp_barrier_request {
+struct of_barrier_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 20;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_barrier_reply {
+struct of_barrier_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 21;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_request {
+struct of_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 7;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_reply {
+struct of_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 8;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_set_config {
+struct of_set_config : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 9;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_table_mod {
+struct of_table_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t config;
 };
 
-struct ofp_port_desc {
+struct of_port_desc {
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     of_mac_addr_t hw_addr;
-    uint8_t[2] pad2;
+    pad(2);
     of_port_name_t name;
-    uint32_t config;
-    uint32_t state;
-    uint32_t curr;
-    uint32_t advertised;
-    uint32_t supported;
-    uint32_t peer;
+    enum ofp_port_config config;
+    enum ofp_port_state state;
+    enum ofp_port_features curr;
+    enum ofp_port_features advertised;
+    enum ofp_port_features supported;
+    enum ofp_port_features peer;
     uint32_t curr_speed;
     uint32_t max_speed;
 };
 
-struct ofp_features_request {
+struct of_features_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 5;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_features_reply {
+struct of_features_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 6;
     uint16_t length;
     uint32_t xid;
     uint64_t datapath_id;
     uint32_t n_buffers;
     uint8_t n_tables;
-    uint8_t[3] pad;
-    uint32_t capabilities;
+    pad(3);
+    enum ofp_capabilities capabilities;
     uint32_t reserved;
     list(of_port_desc_t) ports;
 };
 
-struct ofp_port_status {
+struct of_port_status : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 12;
     uint16_t length;
     uint32_t xid;
-    uint8_t reason;
-    uint8_t[7] pad;
+    enum ofp_port_reason reason;
+    pad(7);
     of_port_desc_t desc;
 };
 
-struct ofp_port_mod {
+struct of_port_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     of_mac_addr_t hw_addr;
-    uint8_t[2] pad2;
+    pad(2);
     uint32_t config;
     uint32_t mask;
     uint32_t advertise;
-    uint8_t[4] pad3;
+    pad(4);
 };
 
-struct ofp_match_v3 {
-    uint16_t type;
+struct of_match_v3(align=8, length_includes_align=False) {
+    uint16_t type == 1;
     uint16_t length;
     list(of_oxm_t) oxm_list;
 };
 
-struct ofp_oxm_experimenter_header {
-    uint32_t oxm_header;
-    uint32_t experimenter;
-    of_octets_t data;
-};
-
-struct ofp_action_output {
-    uint16_t type;
+struct of_action_output : of_action {
+    uint16_t type == 0;
     uint16_t len;
     of_port_no_t port;
     uint16_t max_len;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_copy_ttl_out {
-    uint16_t type;
+struct of_action_copy_ttl_out : of_action {
+    uint16_t type == 11;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_copy_ttl_in {
-    uint16_t type;
+struct of_action_copy_ttl_in : of_action {
+    uint16_t type == 12;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_set_mpls_ttl {
-    uint16_t type;
+struct of_action_set_mpls_ttl : of_action {
+    uint16_t type == 15;
     uint16_t len;
     uint8_t mpls_ttl;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_dec_mpls_ttl {
-    uint16_t type;
+struct of_action_dec_mpls_ttl : of_action {
+    uint16_t type == 16;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_vlan {
-    uint16_t type;
+struct of_action_push_vlan : of_action {
+    uint16_t type == 17;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_pop_vlan {
-    uint16_t type;
+struct of_action_pop_vlan : of_action {
+    uint16_t type == 18;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_mpls {
-    uint16_t type;
+struct of_action_push_mpls : of_action {
+    uint16_t type == 19;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_pop_mpls {
-    uint16_t type;
+struct of_action_pop_mpls : of_action {
+    uint16_t type == 20;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_queue {
-    uint16_t type;
+struct of_action_set_queue : of_action {
+    uint16_t type == 21;
     uint16_t len;
     uint32_t queue_id;
 };
 
-struct ofp_action_group {
-    uint16_t type;
+struct of_action_group : of_action {
+    uint16_t type == 22;
     uint16_t len;
     uint32_t group_id;
 };
 
-struct ofp_action_set_nw_ttl {
-    uint16_t type;
+struct of_action_set_nw_ttl : of_action {
+    uint16_t type == 23;
     uint16_t len;
     uint8_t nw_ttl;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_dec_nw_ttl {
-    uint16_t type;
+struct of_action_dec_nw_ttl : of_action {
+    uint16_t type == 24;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_set_field {
-    uint16_t type;
+struct of_action_set_field(align=8, length_includes_align=True) : of_action {
+    uint16_t type == 25;
     uint16_t len;
-    of_octets_t field;
+    of_oxm_t field;
 };
 
-struct ofp_action_experimenter {
-    uint16_t type;
+struct of_action_experimenter(align=8, length_includes_align=True) : of_action {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_action {
-    uint16_t type;
+struct of_action {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction {
-    uint16_t type;
+struct of_instruction {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction_goto_table {
-    uint16_t type;
+struct of_instruction_goto_table : of_instruction {
+    uint16_t type == 1;
     uint16_t len;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_instruction_write_metadata {
-    uint16_t type;
+struct of_instruction_write_metadata : of_instruction {
+    uint16_t type == 2;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t metadata;
     uint64_t metadata_mask;
 };
 
-struct ofp_instruction_write_actions {
-    uint16_t type;
+struct of_instruction_write_actions : of_instruction {
+    uint16_t type == 3;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_instruction_apply_actions {
-    uint16_t type;
+struct of_instruction_apply_actions : of_instruction {
+    uint16_t type == 4;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_instruction_clear_actions {
-    uint16_t type;
+struct of_instruction_clear_actions : of_instruction {
+    uint16_t type == 5;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction_experimenter {
-    uint16_t type;		
+struct of_instruction_experimenter : of_instruction {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_flow_add {
+struct of_flow_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == ?;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_modify {
+struct of_flow_add : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 0;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_modify_strict {
+struct of_flow_modify : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 1;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_delete {
+struct of_flow_modify_strict : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 2;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_delete_strict {
+struct of_flow_delete : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 3;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_bucket {
+struct of_flow_delete_strict : of_flow_mod {
+    uint8_t version;
+    uint8_t type == 14;
+    uint16_t length;
+    uint32_t xid;
+    uint64_t cookie;
+    uint64_t cookie_mask;
+    uint8_t table_id;
+    of_fm_cmd_t _command == 4;
+    uint16_t idle_timeout;
+    uint16_t hard_timeout;
+    uint16_t priority;
+    uint32_t buffer_id;
+    of_port_no_t out_port;
+    uint32_t out_group;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
+    of_match_t match;
+    list(of_instruction_t) instructions;
+};
+
+struct of_bucket {
     uint16_t len;
     uint16_t weight;
     of_port_no_t watch_port;
     uint32_t watch_group;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_group_mod {
+struct of_group_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
     uint16_t length;
     uint32_t xid;
-    uint16_t command;
-    uint8_t group_type;
-    uint8_t pad;
+    enum ofp_group_mod_command command == ?;
+    enum ofp_group_type group_type;
+    pad(1);
     uint32_t group_id;
     list(of_bucket_t) buckets;
 };
 
-struct ofp_packet_out {
+struct of_group_add : of_group_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 0;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_group_modify : of_group_mod {
+    uint8_t version;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 1;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_group_delete : of_group_mod {
+    uint8_t version;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 2;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_packet_out : of_header {
+    uint8_t version;
+    uint8_t type == 13;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
     of_port_no_t in_port;
     uint16_t actions_len;
-    uint8_t[6] pad;
+    pad(6);
     list(of_action_t) actions;
     of_octets_t data;
 };
 
-struct ofp_packet_in {
+struct of_packet_in : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 10;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
@@ -911,13 +964,13 @@
     uint8_t reason;
     uint8_t table_id;
     of_match_t match;
-    uint8_t[2] pad;
+    pad(2);
     of_octets_t data; /* FIXME: Ensure total_len gets updated */
 };
 
-struct ofp_flow_removed {
+struct of_flow_removed : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 11;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
@@ -933,40 +986,158 @@
     of_match_t match;
 };
 
-struct ofp_error_msg {
+struct of_error_msg : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 1;
     uint16_t length;
     uint32_t xid;
-    uint16_t err_type;
-    uint16_t code;
+    uint16_t err_type == ?;
+};
+
+struct of_hello_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 0;
+    enum ofp_hello_failed_code code;
     of_octets_t data;
 };
 
-// struct ofp_error_experimenter_msg {
-//    uint8_t version;
-//    uint8_t type;
-//    uint16_t length;
-//    uint32_t xid;
-//    uint16_t err_type;
-//    uint16_t subtype;
-//    uint32_t experimenter;
-//    of_octets_t data;
-//};
+struct of_bad_request_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 1;
+    enum ofp_bad_request_code code;
+    of_octets_t data;
+};
+
+struct of_bad_action_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 2;
+    enum ofp_bad_action_code code;
+    of_octets_t data;
+};
+
+struct of_bad_instruction_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 3;
+    enum ofp_bad_instruction_code code;
+    of_octets_t data;
+};
+
+struct of_bad_match_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 4;
+    enum ofp_bad_match_code code;
+    of_octets_t data;
+};
+
+struct of_flow_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 5;
+    enum ofp_flow_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_group_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 6;
+    enum ofp_group_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_port_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 7;
+    enum ofp_port_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_table_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 8;
+    enum ofp_table_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_queue_op_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 9;
+    enum ofp_queue_op_failed_code code;
+    of_octets_t data;
+};
+
+struct of_switch_config_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 10;
+    enum ofp_switch_config_failed_code code;
+    of_octets_t data;
+};
+
+struct of_role_request_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 11;
+    enum ofp_role_request_failed_code code;
+    of_octets_t data;
+};
+
+struct of_experimenter_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 0xffff;
+    uint16_t subtype;
+    uint32_t experimenter;
+    of_octets_t data;
+};
 
 // STATS ENTRIES: flow, table, port, queue, group stats, group desc stats
 // FIXME: Verify disambiguation w/ length in object and entry
 
-struct ofp_flow_stats_entry {
+struct of_flow_stats_entry {
     uint16_t length;
     uint8_t table_id;
-    uint8_t pad;
+    pad(1);
     uint32_t duration_sec;
     uint32_t duration_nsec;
     uint16_t priority;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
-    uint8_t[6] pad2;
+    pad(6);
     uint64_t cookie;
     uint64_t packet_count;
     uint64_t byte_count;
@@ -974,9 +1145,9 @@
     list(of_instruction_t) instructions;
 };
 
-struct ofp_table_stats_entry {
+struct of_table_stats_entry {
     uint8_t table_id;
-    uint8_t[7] pad;
+    pad(7);
     of_table_name_t name;
     of_match_bmap_t match;
     of_wc_bmap_t wildcards;
@@ -994,9 +1165,9 @@
     uint64_t matched_count;
 };
 
-struct ofp_port_stats_entry {
+struct of_port_stats_entry {
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t rx_packets;
     uint64_t tx_packets;
     uint64_t rx_bytes;
@@ -1011,7 +1182,7 @@
     uint64_t collisions;
 };
 
-struct ofp_queue_stats_entry {
+struct of_queue_stats_entry {
     of_port_no_t port_no;
     uint32_t queue_id;
     uint64_t tx_bytes;
@@ -1019,26 +1190,26 @@
     uint64_t tx_errors;
 };
 
-struct ofp_bucket_counter {
+struct of_bucket_counter {
     uint64_t packet_count;
     uint64_t byte_count;
 };
 
-struct ofp_group_stats_entry {
+struct of_group_stats_entry {
     uint16_t length;
-    uint8_t[2] pad;
+    pad(2);
     uint32_t group_id;
     uint32_t ref_count;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t packet_count;
     uint64_t byte_count;
     list(of_bucket_counter_t) bucket_stats;
 };
 
-struct ofp_group_desc_stats_entry {
+struct of_group_desc_stats_entry {
     uint16_t length;
-    uint8_t type;
-    uint8_t pad;
+    enum ofp_group_type group_type;
+    pad(1);
     uint32_t group_id;
     list(of_bucket_t) buckets;
 };
@@ -1046,24 +1217,44 @@
 // STATS: 
 //  Desc, flow, agg, table, port, queue, group, group_desc, group_feat, experi
 
-struct ofp_desc_stats_request {
+struct of_stats_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == ?;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_desc_stats_reply {
+struct of_stats_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == ?;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+};
+
+struct of_desc_stats_request : of_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+};
+
+struct of_desc_stats_reply : of_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     of_desc_str_t mfr_desc;
     of_desc_str_t hw_desc;
     of_desc_str_t sw_desc;
@@ -1071,196 +1262,196 @@
     of_desc_str_t dp_desc;
 };
 
-struct ofp_flow_stats_request {
+struct of_flow_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 1;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_port_no_t out_port;
     uint32_t out_group;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t cookie;
     uint64_t cookie_mask;
     of_match_t match;
 };
 
-struct ofp_flow_stats_reply {
+struct of_flow_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 1;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_flow_stats_entry_t) entries;
 };
 
-struct ofp_aggregate_stats_request {
+struct of_aggregate_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 2;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_port_no_t out_port;
     uint32_t out_group;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t cookie;
     uint64_t cookie_mask;
     of_match_t match;
 };
 
-struct ofp_aggregate_stats_reply {
+struct of_aggregate_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 2;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     uint64_t packet_count;
     uint64_t byte_count;
     uint32_t flow_count;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_table_stats_request {
+struct of_table_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 3;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_table_stats_reply {
+struct of_table_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 3;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_table_stats_entry_t) entries;
 };
 
-struct ofp_port_stats_request {
+struct of_port_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 4;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_port_stats_reply {
+struct of_port_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 4;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_port_stats_entry_t) entries;
 };
 
-struct ofp_queue_stats_request {
+struct of_queue_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 5;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     of_port_no_t port_no;
     uint32_t queue_id;
 };
 
-struct ofp_queue_stats_reply {
+struct of_queue_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 5;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_queue_stats_entry_t) entries;
 };
 
-struct ofp_group_stats_request {
+struct of_group_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 6;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint32_t group_id;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_group_stats_reply {
+struct of_group_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 6;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_group_stats_entry_t) entries;
 };
 
-struct ofp_group_desc_stats_request {
+struct of_group_desc_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 7;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_group_desc_stats_reply {
+struct of_group_desc_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 7;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_group_desc_stats_entry_t) entries;
 };
 
-struct ofp_group_features_stats_request {
+struct of_group_features_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 8;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_group_features_stats_reply {
+struct of_group_features_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 8;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     uint32_t types;
     uint32_t capabilities;
     uint32_t max_groups_all;
@@ -1273,106 +1464,108 @@
     uint32_t actions_ff;
 };
 
-struct ofp_experimenter_stats_request {
+struct of_experimenter_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
-    uint32_t experimenter;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == ?;
     uint32_t subtype;
     of_octets_t data;
 };
 
-struct ofp_experimenter_stats_reply {
+struct of_experimenter_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
-    uint32_t experimenter;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == ?;
     uint32_t subtype;
     of_octets_t data;
 };
 
 // END OF STATS OBJECTS
 
-struct ofp_queue_prop {
-    uint16_t type;
+struct of_queue_prop {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_prop_min_rate {
-    uint16_t type;
+struct of_queue_prop_min_rate : of_queue_prop {
+    uint16_t type == 1;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint16_t rate;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_queue_prop_max_rate {
-    uint16_t type;
+struct of_queue_prop_max_rate : of_queue_prop {
+    uint16_t type == 2;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint16_t rate;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_queue_prop_experimenter {
-    uint16_t type;
+struct of_queue_prop_experimenter : of_queue_prop {
+    uint16_t type == 65535;
     uint16_t len;
-    uint8_t[4] pad;
-    uint32_t experimenter;
-    uint8_t[4] pad;
+    pad(4);
+    uint32_t experimenter == ?;
+    pad(4);
     of_octets_t data;
 };
 
-struct ofp_packet_queue {
+struct of_packet_queue {
     uint32_t queue_id;
     of_port_no_t port;
     uint16_t len;
-    uint8_t[6] pad;
+    pad(6);
     list(of_queue_prop_t) properties;
 };
 
-struct ofp_queue_get_config_request {
+struct of_queue_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 22;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_get_config_reply {
+struct of_queue_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 23;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[4] pad;
+    pad(4);
     list(of_packet_queue_t) queues;
 };
 
-struct ofp_role_request {
+struct of_role_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 24;
     uint16_t length;
     uint32_t xid;
-    uint32_t role;
-    uint8_t[4] pad;
+    enum ofp_controller_role role;
+    pad(4);
     uint64_t generation_id;
 };
 
-struct ofp_role_reply {
+struct of_role_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 25;
     uint16_t length;
     uint32_t xid;
-    of_octets_t data;
+    enum ofp_controller_role role;
+    pad(4);
+    uint64_t generation_id;
 };
diff --git a/openflow_input/standard-1.3 b/openflow_input/standard-1.3
index bf7503b..af7e501 100644
--- a/openflow_input/standard-1.3
+++ b/openflow_input/standard-1.3
@@ -35,8 +35,8 @@
     OFP_MAX_TABLE_NAME_LEN = 32,
     OFP_MAX_PORT_NAME_LEN = 16,
 
-    OFP_TCP_PORT = 6633,
-    OFP_SSL_PORT = 6633,
+    OFP_TCP_PORT = 6653,
+    OFP_SSL_PORT = 6653,
 
     OFP_ETH_ALEN = 6,
 
@@ -58,11 +58,7 @@
     OFPQ_MIN_RATE_UNCFG = 0xffff,
 };
 
-enum ofp_port_no {
-    OFPP_ANY = 0xffffffff,
-};
-
-enum ofp_port {
+enum ofp_port(wire_type=uint32_t) {
     OFPP_MAX = 0xffffff00,
     OFPP_IN_PORT = 0xfffffff8,
     OFPP_TABLE = 0xfffffff9,
@@ -71,9 +67,10 @@
     OFPP_ALL = 0xfffffffc,
     OFPP_CONTROLLER = 0xfffffffd,
     OFPP_LOCAL = 0xfffffffe,
+    OFPP_ANY = 0xffffffff,
 };
 
-enum ofp_type {
+enum ofp_type(wire_type=uint8_t) {
     OFPT_HELLO = 0,
     OFPT_ERROR = 1,
     OFPT_ECHO_REQUEST = 2,
@@ -92,8 +89,8 @@
     OFPT_GROUP_MOD = 15,
     OFPT_PORT_MOD = 16,
     OFPT_TABLE_MOD = 17,
-    OFPT_MULTIPART_REQUEST = 18,
-    OFPT_MULTIPART_REPLY = 19,
+    OFPT_STATS_REQUEST = 18,
+    OFPT_STATS_REPLY = 19,
     OFPT_BARRIER_REQUEST = 20,
     OFPT_BARRIER_REPLY = 21,
     OFPT_QUEUE_GET_CONFIG_REQUEST = 22,
@@ -106,23 +103,23 @@
     OFPT_METER_MOD = 29,
 };
 
-enum ofp_config_flags {
+enum ofp_config_flags(wire_type=uint16_t, bitmask=True) {
     OFPC_FRAG_NORMAL = 0,
     OFPC_FRAG_DROP = 1,
     OFPC_FRAG_REASM = 2,
     OFPC_FRAG_MASK = 3,
 };
 
-enum ofp_table_config {
+enum ofp_table_config(wire_type=uint32_t, bitmask=True) {
     OFPTC_DEPRECATED_MASK = 0x3,
 };
 
-enum ofp_table {
+enum ofp_table(wire_type=uint8_t, complete=False) {
     OFPTT_MAX = 0xfe,
     OFPTT_ALL = 0xff,
 };
 
-enum ofp_capabilities {
+enum ofp_capabilities(wire_type=uint32_t, bitmask=True) {
     OFPC_FLOW_STATS = 0x1,
     OFPC_TABLE_STATS = 0x2,
     OFPC_PORT_STATS = 0x4,
@@ -132,20 +129,21 @@
     OFPC_PORT_BLOCKED = 0x100,
 };
 
-enum ofp_port_config {
+enum ofp_port_config(wire_type=uint32_t, bitmask=True) {
     OFPPC_PORT_DOWN = 0x1,
     OFPPC_NO_RECV = 0x4,
     OFPPC_NO_FWD = 0x20,
     OFPPC_NO_PACKET_IN = 0x40,
+    OFPPC_BSN_MIRROR_DEST = 0x80000000,
 };
 
-enum ofp_port_state {
+enum ofp_port_state(wire_type=uint32_t, bitmask=True) {
     OFPPS_LINK_DOWN = 0x1,
     OFPPS_BLOCKED = 0x2,
     OFPPS_LIVE = 0x4,
 };
 
-enum ofp_port_features {
+enum ofp_port_features(wire_type=uint32_t, bitmask=True) {
     OFPPF_10MB_HD = 0x1,
     OFPPF_10MB_FD = 0x2,
     OFPPF_100MB_HD = 0x4,
@@ -164,30 +162,32 @@
     OFPPF_PAUSE_ASYM = 0x8000,
 };
 
-enum ofp_port_reason {
+enum ofp_port_reason(wire_type=uint8_t) {
     OFPPR_ADD = 0,
     OFPPR_DELETE = 1,
     OFPPR_MODIFY = 2,
 };
 
-enum ofp_match_type {
+enum ofp_match_type(wire_type=uint16_t) {
     OFPMT_STANDARD = 0,
     OFPMT_OXM = 1,
 };
 
-enum ofp_oxm_class {
+enum ofp_oxm_class(wire_type=uint16_t) {
     OFPXMC_NXM_0 = 0,
     OFPXMC_NXM_1 = 1,
     OFPXMC_OPENFLOW_BASIC = 0x8000,
     OFPXMC_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_vlan_id {
+enum ofp_vlan_id(wire_type=uint16_t) {
     OFPVID_NONE = 0,
     OFPVID_PRESENT = 0x1000,
 };
 
-enum ofp_ipv6exthdr_flags {
+// FIXME: OF spec specified this as '9' bits, implicitly adding
+// to full byte
+enum ofp_ipv6exthdr_flags(wire_type=uint16_t, bitmask=True) {
     OFPIEH_NONEXT = 0x1,
     OFPIEH_ESP = 0x2,
     OFPIEH_AUTH = 0x4,
@@ -199,7 +199,7 @@
     OFPIEH_UNSEQ = 0x100,
 };
 
-enum ofp_action_type {
+enum ofp_action_type(wire_type=uint16_t) {
     OFPAT_OUTPUT = 0,
     OFPAT_COPY_TTL_OUT = 0xb,
     OFPAT_COPY_TTL_IN = 0xc,
@@ -219,12 +219,12 @@
     OFPAT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_controller_max_len {
+enum ofp_controller_max_len(wire_type=uint16_t, complete=False) {
     OFPCML_MAX = 0xffe5,
     OFPCML_NO_BUFFER = 0xffff,
 };
 
-enum ofp_instruction_type {
+enum ofp_instruction_type(wire_type=uint16_t, bitmask=True) {
     OFPIT_GOTO_TABLE = 0x1,
     OFPIT_WRITE_METADATA = 0x2,
     OFPIT_WRITE_ACTIONS = 0x3,
@@ -234,7 +234,7 @@
     OFPIT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_flow_mod_command {
+enum ofp_flow_mod_command(wire_type=uint8_t) {
     OFPFC_ADD = 0,
     OFPFC_MODIFY = 1,
     OFPFC_MODIFY_STRICT = 2,
@@ -242,73 +242,92 @@
     OFPFC_DELETE_STRICT = 4,
 };
 
-enum ofp_flow_mod_flags {
+enum ofp_flow_mod_flags(wire_type=uint16_t, bitmask=True) {
     OFPFF_SEND_FLOW_REM = 0x1,
     OFPFF_CHECK_OVERLAP = 0x2,
     OFPFF_RESET_COUNTS = 0x4,
     OFPFF_NO_PKT_COUNTS = 0x8,
     OFPFF_NO_BYT_COUNTS = 0x10,
+
+    /* Non-standard, enabled by an experimenter message */
+    /* See the bsn_flow_idle input file */
+    OFPFF_BSN_SEND_IDLE = 0x80,
 };
 
-enum ofp_group {
+enum ofp_group(wire_type=uint32_t, complete=False) {
     OFPG_MAX = 0xffffff00,
     OFPG_ALL = 0xfffffffc,
     OFPG_ANY = 0xffffffff,
 };
 
-enum ofp_group_mod_command {
+enum ofp_group_mod_command(wire_type=uint16_t) {
     OFPGC_ADD = 0,
     OFPGC_MODIFY = 1,
     OFPGC_DELETE = 2,
 };
 
-enum ofp_group_type {
+enum ofp_group_type(wire_type=uint8_t) {
     OFPGT_ALL = 0,
     OFPGT_SELECT = 1,
     OFPGT_INDIRECT = 2,
     OFPGT_FF = 3,
 };
 
-enum ofp_packet_in_reason {
+enum ofp_packet_in_reason(wire_type=uint8_t) {
     OFPR_NO_MATCH = 0,
     OFPR_ACTION = 1,
     OFPR_INVALID_TTL = 2,
+
+    // non-standard BSN extensions. OF does not have a standard-conformant
+    // way to extend the set of packet_in reasons
+    OFPR_BSN_NEW_HOST = 128,
+    OFPR_BSN_STATION_MOVE = 129,
+    OFPR_BSN_BAD_VLAN = 130,
+    OFPR_BSN_DESTINATION_LOOKUP_FAILURE = 131,
+    OFPR_BSN_NO_ROUTE = 132,
+    OFPR_BSN_ICMP_ECHO_REQUEST = 133,
+    OFPR_BSN_DEST_NETWORK_UNREACHABLE = 134,
+    OFPR_BSN_DEST_HOST_UNREACHABLE = 135,
+    OFPR_BSN_DEST_PORT_UNREACHABLE = 136,
+    OFPR_BSN_FRAGMENTATION_REQUIRED = 137,
+    OFPR_BSN_ARP = 139,
+    OFPR_BSN_DHCP = 140,
 };
 
-enum ofp_flow_removed_reason {
+enum ofp_flow_removed_reason(wire_type=uint8_t) {
     OFPRR_IDLE_TIMEOUT = 0,
     OFPRR_HARD_TIMEOUT = 1,
     OFPRR_DELETE = 2,
     OFPRR_GROUP_DELETE = 3,
 };
 
-enum ofp_meter {
+enum ofp_meter(wire_type=uint32_t, complete=False) {
     OFPM_MAX = 0xffff0000,
     OFPM_SLOWPATH = 0xfffffffd,
     OFPM_CONTROLLER = 0xfffffffe,
     OFPM_ALL = 0xffffffff,
 };
 
-enum ofp_meter_band_type {
+enum ofp_meter_band_type(wire_type=uint16_t) {
     OFPMBT_DROP = 0x1,
     OFPMBT_DSCP_REMARK = 0x2,
     OFPMBT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_meter_mod_command {
+enum ofp_meter_mod_command(wire_type=uint16_t) {
     OFPMC_ADD = 0,
     OFPMC_MODIFY = 1,
     OFPMC_DELETE = 2,
 };
 
-enum ofp_meter_flags {
+enum ofp_meter_flags(wire_type=uint16_t, bitmask=True) {
     OFPMF_KBPS = 0x1,
     OFPMF_PKTPS = 0x2,
     OFPMF_BURST = 0x4,
     OFPMF_STATS = 0x8,
 };
 
-enum ofp_error_type {
+enum ofp_error_type(wire_type=uint16_t) {
     OFPET_HELLO_FAILED = 0,
     OFPET_BAD_REQUEST = 1,
     OFPET_BAD_ACTION = 2,
@@ -326,17 +345,17 @@
     OFPET_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_hello_failed_code {
+enum ofp_hello_failed_code(wire_type=uint16_t) {
     OFPHFC_INCOMPATIBLE = 0,
     OFPHFC_EPERM = 1,
 };
 
-enum ofp_bad_request_code {
+enum ofp_bad_request_code(wire_type=uint16_t) {
     OFPBRC_BAD_VERSION = 0,
     OFPBRC_BAD_TYPE = 1,
-    OFPBRC_BAD_MULTIPART = 2,
+    OFPBRC_BAD_STAT = 2,
     OFPBRC_BAD_EXPERIMENTER = 3,
-    OFPBRC_BAD_EXP_TYPE = 4,
+    OFPBRC_BAD_EXPERIMENTER_TYPE = 4,
     OFPBRC_EPERM = 5,
     OFPBRC_BAD_LEN = 6,
     OFPBRC_BUFFER_EMPTY = 7,
@@ -348,11 +367,11 @@
     OFPBRC_MULTIPART_BUFFER_OVERFLOW = 13,
 };
 
-enum ofp_bad_action_code {
+enum ofp_bad_action_code(wire_type=uint16_t) {
     OFPBAC_BAD_TYPE = 0,
     OFPBAC_BAD_LEN = 1,
     OFPBAC_BAD_EXPERIMENTER = 2,
-    OFPBAC_BAD_EXP_TYPE = 3,
+    OFPBAC_BAD_EXPERIMENTER_TYPE = 3,
     OFPBAC_BAD_OUT_PORT = 4,
     OFPBAC_BAD_ARGUMENT = 5,
     OFPBAC_EPERM = 6,
@@ -367,19 +386,19 @@
     OFPBAC_BAD_SET_ARGUMENT = 15,
 };
 
-enum ofp_bad_instruction_code {
+enum ofp_bad_instruction_code(wire_type=uint16_t) {
     OFPBIC_UNKNOWN_INST = 0,
     OFPBIC_UNSUP_INST = 1,
     OFPBIC_BAD_TABLE_ID = 2,
     OFPBIC_UNSUP_METADATA = 3,
     OFPBIC_UNSUP_METADATA_MASK = 4,
     OFPBIC_BAD_EXPERIMENTER = 5,
-    OFPBIC_BAD_EXP_TYPE = 6,
+    OFPBIC_BAD_EXPERIMENTER_TYPE = 6,
     OFPBIC_BAD_LEN = 7,
     OFPBIC_EPERM = 8,
 };
 
-enum ofp_bad_match_code {
+enum ofp_bad_match_code(wire_type=uint16_t) {
     OFPBMC_BAD_TYPE = 0,
     OFPBMC_BAD_LEN = 1,
     OFPBMC_BAD_TAG = 2,
@@ -394,7 +413,7 @@
     OFPBMC_EPERM = 11,
 };
 
-enum ofp_flow_mod_failed_code {
+enum ofp_flow_mod_failed_code(wire_type=uint16_t) {
     OFPFMFC_UNKNOWN = 0,
     OFPFMFC_TABLE_FULL = 1,
     OFPFMFC_BAD_TABLE_ID = 2,
@@ -405,7 +424,7 @@
     OFPFMFC_BAD_FLAGS = 7,
 };
 
-enum ofp_group_mod_failed_code {
+enum ofp_group_mod_failed_code(wire_type=uint16_t) {
     OFPGMFC_GROUP_EXISTS = 0,
     OFPGMFC_INVALID_GROUP = 1,
     OFPGMFC_WEIGHT_UNSUPPORTED = 2,
@@ -423,7 +442,7 @@
     OFPGMFC_EPERM = 14,
 };
 
-enum ofp_port_mod_failed_code {
+enum ofp_port_mod_failed_code(wire_type=uint16_t) {
     OFPPMFC_BAD_PORT = 0,
     OFPPMFC_BAD_HW_ADDR = 1,
     OFPPMFC_BAD_CONFIG = 2,
@@ -431,31 +450,31 @@
     OFPPMFC_EPERM = 4,
 };
 
-enum ofp_table_mod_failed_code {
+enum ofp_table_mod_failed_code(wire_type=uint16_t) {
     OFPTMFC_BAD_TABLE = 0,
     OFPTMFC_BAD_CONFIG = 1,
     OFPTMFC_EPERM = 2,
 };
 
-enum ofp_queue_op_failed_code {
+enum ofp_queue_op_failed_code(wire_type=uint16_t) {
     OFPQOFC_BAD_PORT = 0,
     OFPQOFC_BAD_QUEUE = 1,
     OFPQOFC_EPERM = 2,
 };
 
-enum ofp_switch_config_failed_code {
+enum ofp_switch_config_failed_code(wire_type=uint16_t) {
     OFPSCFC_BAD_FLAGS = 0,
     OFPSCFC_BAD_LEN = 1,
     OFPSCFC_EPERM = 2,
 };
 
-enum ofp_role_request_failed_code {
+enum ofp_role_request_failed_code(wire_type=uint16_t){
     OFPRRFC_STALE = 0,
     OFPRRFC_UNSUP = 1,
     OFPRRFC_BAD_ROLE = 2,
 };
 
-enum ofp_meter_mod_failed_code {
+enum ofp_meter_mod_failed_code(wire_type=uint16_t) {
     OFPMMFC_UNKNOWN = 0,
     OFPMMFC_METER_EXISTS = 1,
     OFPMMFC_INVALID_METER = 2,
@@ -470,7 +489,7 @@
     OFPMMFC_OUT_OF_BANDS = 11,
 };
 
-enum ofp_table_features_failed_code {
+enum ofp_table_features_failed_code(wire_type=uint16_t) {
     OFPTFFC_BAD_TABLE = 0,
     OFPTFFC_BAD_METADATA = 1,
     OFPTFFC_BAD_TYPE = 2,
@@ -479,33 +498,33 @@
     OFPTFFC_EPERM = 5,
 };
 
-enum ofp_multipart_types {
-    OFPMP_DESC = 0,
-    OFPMP_FLOW = 1,
-    OFPMP_AGGREGATE = 2,
-    OFPMP_TABLE = 3,
-    OFPMP_PORT_STATS = 4,
-    OFPMP_QUEUE = 5,
-    OFPMP_GROUP = 6,
-    OFPMP_GROUP_DESC = 7,
-    OFPMP_GROUP_FEATURES = 8,
-    OFPMP_METER = 9,
-    OFPMP_METER_CONFIG = 10,
-    OFPMP_METER_FEATURES = 11,
-    OFPMP_TABLE_FEATURES = 12,
-    OFPMP_PORT_DESC = 13,
-    OFPMP_EXPERIMENTER = 0xffff,
+enum ofp_stats_type(wire_type=uint16_t) {
+    OFPST_DESC = 0,
+    OFPST_FLOW = 1,
+    OFPST_AGGREGATE = 2,
+    OFPST_TABLE = 3,
+    OFPST_PORT = 4,
+    OFPST_QUEUE = 5,
+    OFPST_GROUP = 6,
+    OFPST_GROUP_DESC = 7,
+    OFPST_GROUP_FEATURES = 8,
+    OFPST_METER = 9,
+    OFPST_METER_CONFIG = 10,
+    OFPST_METER_FEATURES = 11,
+    OFPST_TABLE_FEATURES = 12,
+    OFPST_PORT_DESC = 13,
+    OFPST_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_multipart_request_flags {
-    OFPMPF_REQ_MORE = 0x1,
+enum ofp_stats_request_flags(wire_type=uint16_t, bitmask=True) {
+    OFPSF_REQ_MORE = 0x1,
 };
 
-enum ofp_multipart_reply_flags {
-    OFPMPF_REPLY_MORE = 0x1,
+enum ofp_stats_reply_flags(wire_type=uint16_t, bitmask=True) {
+    OFPSF_REPLY_MORE = 0x1,
 };
 
-enum ofp_table_feature_prop_type {
+enum ofp_table_feature_prop_type(wire_type=uint16_t) {
     OFPTFPT_INSTRUCTIONS = 0,
     OFPTFPT_INSTRUCTIONS_MISS = 1,
     OFPTFPT_NEXT_TABLES = 2,
@@ -524,532 +543,592 @@
     OFPTFPT_EXPERIMENTER_MISS = 0xffff,
 };
 
-enum ofp_group_capabilities {
+enum ofp_group_capabilities(wire_type=uint32_t, bitmask=True) {
     OFPGFC_SELECT_WEIGHT = 0x1,
     OFPGFC_SELECT_LIVENESS = 0x2,
     OFPGFC_CHAINING = 0x4,
     OFPGFC_CHAINING_CHECKS = 0x8,
 };
 
-enum ofp_queue_properties {
+enum ofp_queue_properties(wire_type=uint16_t) {
     OFPQT_MIN_RATE = 0x1,
     OFPQT_MAX_RATE = 0x2,
     OFPQT_EXPERIMENTER = 0xffff,
 };
 
-enum ofp_controller_role {
+enum ofp_controller_role(wire_type=uint32_t) {
     OFPCR_ROLE_NOCHANGE = 0,
     OFPCR_ROLE_EQUAL = 1,
     OFPCR_ROLE_MASTER = 2,
     OFPCR_ROLE_SLAVE = 3,
 };
 
-struct ofp_header {
+enum ofp_hello_elem_type(wire_type=uint16_t) {
+    OFPHET_VERSIONBITMAP = 1,
+};
+
+/* XXX rename to of_message */
+struct of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == ?;
     uint16_t length;
     uint32_t xid;
 };
 
+struct of_uint64 {
+    uint64_t value;
+};
+
 // Special structures used for managing scalar list elements
-struct ofp_uint32 {
+struct of_uint32 {
     uint32_t value;
 };
 
 // Special structures used for managing scalar list elements
-struct ofp_uint8 {
+struct of_uint8 {
     uint8_t value;
 };
 
-struct ofp_hello_elem {
-    uint16_t type;
+struct of_hello_elem {
+    uint16_t type == ?;
     uint16_t length;
 };
 
-struct ofp_hello_elem_versionbitmap {
-    uint16_t type;
+struct of_hello_elem_versionbitmap : of_hello_elem {
+    uint16_t type == 1;
     uint16_t length;
     list(of_uint32_t) bitmaps;
 };
 
-struct ofp_hello {
+struct of_hello : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 0;
     uint16_t length;
     uint32_t xid;
     list(of_hello_elem_t) elements;
 };
 
-struct ofp_echo_request {
+struct of_echo_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 2;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_echo_reply {
+struct of_echo_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 3;
     uint16_t length;
     uint32_t xid;
     of_octets_t data;
 };
 
-struct ofp_experimenter {
+struct of_experimenter : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 4;
     uint16_t length;
     uint32_t xid;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     uint32_t subtype;
     of_octets_t data;
 };
 
-struct ofp_barrier_request {
+struct of_barrier_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 20;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_barrier_reply {
+struct of_barrier_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 21;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_request {
+struct of_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 7;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_get_config_reply {
+struct of_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 8;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_set_config {
+struct of_set_config : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 9;
     uint16_t length;
     uint32_t xid;
-    uint16_t flags;
+    enum ofp_config_flags flags;
     uint16_t miss_send_len;
 };
 
-struct ofp_table_mod {
+struct of_table_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 17;
     uint16_t length;
     uint32_t xid;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t config;
 };
 
-struct ofp_port_desc {
+struct of_port_desc {
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     of_mac_addr_t hw_addr;
-    uint8_t[2] pad2;
+    pad(2);
     of_port_name_t name;
-    uint32_t config;
-    uint32_t state;
-    uint32_t curr;
-    uint32_t advertised;
-    uint32_t supported;
-    uint32_t peer;
+    enum ofp_port_config config;
+    enum ofp_port_state state;
+    enum ofp_port_features curr;
+    enum ofp_port_features advertised;
+    enum ofp_port_features supported;
+    enum ofp_port_features peer;
     uint32_t curr_speed;
     uint32_t max_speed;
 };
 
-struct ofp_features_request {
+struct of_features_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 5;
     uint16_t length;
     uint32_t xid;
 };
 
-struct ofp_features_reply {
+struct of_features_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 6;
     uint16_t length;
     uint32_t xid;
     uint64_t datapath_id;
     uint32_t n_buffers;
     uint8_t n_tables;
     uint8_t auxiliary_id;
-    uint8_t[2] pad;
-    uint32_t capabilities;
+    pad(2);
+    enum ofp_capabilities capabilities;
     uint32_t reserved;
 };
 
-struct ofp_port_status {
+struct of_port_status : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 12;
     uint16_t length;
     uint32_t xid;
-    uint8_t reason;
-    uint8_t[7] pad;
+    enum ofp_port_reason reason;
+    pad(7);
     of_port_desc_t desc;
 };
 
-struct ofp_port_mod {
+struct of_port_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 16;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     of_mac_addr_t hw_addr;
-    uint8_t[2] pad2;
+    pad(2);
     uint32_t config;
     uint32_t mask;
     uint32_t advertise;
-    uint8_t[4] pad3;
+    pad(4);
 };
 
 // FIXME Does this need to be v4?
-struct ofp_match_v3 {
-    uint16_t type;
+struct of_match_v3(align=8, length_includes_align=False) {
+    uint16_t type == 1;
     uint16_t length;
     list(of_oxm_t) oxm_list;
 };
 
-struct ofp_oxm_experimenter_header {
-    uint32_t oxm_header;
-    uint32_t experimenter;
-    of_octets_t data;
-};
-
 // This looks like an action header, but is standalone.  See 
 // ofp_table_features_prop_actions
-struct ofp_action_id {
+struct of_action_id {
     uint16_t type;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_output {
-    uint16_t type;
+struct of_action_output : of_action {
+    uint16_t type == 0;
     uint16_t len;
     of_port_no_t port;
     uint16_t max_len;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_action_copy_ttl_out {
-    uint16_t type;
+struct of_action_copy_ttl_out : of_action {
+    uint16_t type == 11;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_copy_ttl_in {
-    uint16_t type;
+struct of_action_copy_ttl_in : of_action {
+    uint16_t type == 12;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_set_mpls_ttl {
-    uint16_t type;
+struct of_action_set_mpls_ttl : of_action {
+    uint16_t type == 15;
     uint16_t len;
     uint8_t mpls_ttl;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_dec_mpls_ttl {
-    uint16_t type;
+struct of_action_dec_mpls_ttl : of_action {
+    uint16_t type == 16;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_vlan {
-    uint16_t type;
+struct of_action_push_vlan : of_action {
+    uint16_t type == 17;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_pop_vlan {
-    uint16_t type;
+struct of_action_pop_vlan : of_action {
+    uint16_t type == 18;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_mpls {
-    uint16_t type;
+struct of_action_push_mpls : of_action {
+    uint16_t type == 19;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_pop_mpls {
-    uint16_t type;
+struct of_action_pop_mpls : of_action {
+    uint16_t type == 20;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action_set_queue {
-    uint16_t type;
+struct of_action_set_queue : of_action {
+    uint16_t type == 21;
     uint16_t len;
     uint32_t queue_id;
 };
 
-struct ofp_action_group {
-    uint16_t type;
+struct of_action_group : of_action {
+    uint16_t type == 22;
     uint16_t len;
     uint32_t group_id;
 };
 
-struct ofp_action_set_nw_ttl {
-    uint16_t type;
+struct of_action_set_nw_ttl : of_action {
+    uint16_t type == 23;
     uint16_t len;
     uint8_t nw_ttl;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_action_dec_nw_ttl {
-    uint16_t type;
+struct of_action_dec_nw_ttl : of_action {
+    uint16_t type == 24;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_set_field {
-    uint16_t type;
+struct of_action_set_field(align=8, length_includes_align=True) : of_action {
+    uint16_t type == 25;
     uint16_t len;
-    of_octets_t field;
+    of_oxm_t field;
 };
 
-struct ofp_action_experimenter {
-    uint16_t type;
+struct of_action_experimenter(align=8, length_includes_align=True): of_action {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_action_pop_pbb {
-    uint16_t type;
+struct of_action_pop_pbb : of_action {
+    uint16_t type == 27;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_action_push_pbb {
-    uint16_t type;
+struct of_action_push_pbb : of_action {
+    uint16_t type == 26;
     uint16_t len;
     uint16_t ethertype;
-    uint8_t[2] pad;
+    pad(2);
 };
 
-struct ofp_action {
-    uint16_t type;
+struct of_action {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction {
-    uint16_t type;
+struct of_instruction {
+    uint16_t type == ?;
     uint16_t len;
 };
 
-struct ofp_instruction_goto_table {
-    uint16_t type;
+struct of_instruction_goto_table : of_instruction {
+    uint16_t type == 1;
     uint16_t len;
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
 };
 
-struct ofp_instruction_write_metadata {
-    uint16_t type;
+struct of_instruction_write_metadata : of_instruction {
+    uint16_t type == 2;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t metadata;
     uint64_t metadata_mask;
 };
 
-struct ofp_instruction_write_actions {
-    uint16_t type;
+struct of_instruction_write_actions : of_instruction {
+    uint16_t type == 3;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_instruction_apply_actions {
-    uint16_t type;
+struct of_instruction_apply_actions : of_instruction {
+    uint16_t type == 4;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_instruction_clear_actions {
-    uint16_t type;
+struct of_instruction_clear_actions : of_instruction {
+    uint16_t type == 5;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_instruction_meter {
-    uint16_t type;
+struct of_instruction_meter : of_instruction {
+    uint16_t type == 6;
     uint16_t len;
     uint32_t meter_id;
 };
 
-struct ofp_instruction_experimenter {
-    uint16_t type;		
+struct of_instruction_experimenter : of_instruction {
+    uint16_t type == 65535;
     uint16_t len;
-    uint32_t experimenter;
+    uint32_t experimenter == ?;
     of_octets_t data;
 };
 
-struct ofp_flow_add {
+struct of_flow_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == ?;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_modify {
+struct of_flow_add : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 0;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_modify_strict {
+struct of_flow_modify : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 1;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_delete {
+struct of_flow_modify_strict : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 2;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_flow_delete_strict {
+struct of_flow_delete : of_flow_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 14;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
     uint64_t cookie_mask;
     uint8_t table_id;
-    of_fm_cmd_t _command;
+    of_fm_cmd_t _command == 3;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
     uint16_t priority;
     uint32_t buffer_id;
     of_port_no_t out_port;
     uint32_t out_group;
-    uint16_t flags;
-    uint8_t[2] pad;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
     of_match_t match;
     list(of_instruction_t) instructions;
 };
 
-struct ofp_bucket {
+struct of_flow_delete_strict : of_flow_mod {
+    uint8_t version;
+    uint8_t type == 14;
+    uint16_t length;
+    uint32_t xid;
+    uint64_t cookie;
+    uint64_t cookie_mask;
+    uint8_t table_id;
+    of_fm_cmd_t _command == 4;
+    uint16_t idle_timeout;
+    uint16_t hard_timeout;
+    uint16_t priority;
+    uint32_t buffer_id;
+    of_port_no_t out_port;
+    uint32_t out_group;
+    enum ofp_flow_mod_flags flags;
+    pad(2);
+    of_match_t match;
+    list(of_instruction_t) instructions;
+};
+
+struct of_bucket {
     uint16_t len;
     uint16_t weight;
     of_port_no_t watch_port;
     uint32_t watch_group;
-    uint8_t[4] pad;
+    pad(4);
     list(of_action_t) actions;
 };
 
-struct ofp_group_mod {
+struct of_group_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
     uint16_t length;
     uint32_t xid;
-    uint16_t command;
-    uint8_t group_type;
-    uint8_t pad;
+    enum ofp_group_mod_command command == ?;
+    enum ofp_group_type group_type;
+    pad(1);
     uint32_t group_id;
     list(of_bucket_t) buckets;
 };
 
-struct ofp_packet_out {
+struct of_group_add : of_group_mod {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 0;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_group_modify : of_group_mod {
+    uint8_t version;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 1;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_group_delete : of_group_mod {
+    uint8_t version;
+    uint8_t type == 15;
+    uint16_t length;
+    uint32_t xid;
+    enum ofp_group_mod_command command == 2;
+    enum ofp_group_type group_type;
+    pad(1);
+    uint32_t group_id;
+    list(of_bucket_t) buckets;
+};
+
+struct of_packet_out : of_header {
+    uint8_t version;
+    uint8_t type == 13;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
     of_port_no_t in_port;
     uint16_t actions_len;
-    uint8_t[6] pad;
+    pad(6);
     list(of_action_t) actions;
     of_octets_t data;
 };
 
-struct ofp_packet_in {
+struct of_packet_in : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 10;
     uint16_t length;
     uint32_t xid;
     uint32_t buffer_id;
@@ -1058,13 +1137,13 @@
     uint8_t table_id;
     uint64_t cookie;
     of_match_t match;
-    uint8_t[2] pad;
+    pad(2);
     of_octets_t data; /* FIXME: Ensure total_len gets updated */
 };
 
-struct ofp_flow_removed {
+struct of_flow_removed : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 11;
     uint16_t length;
     uint32_t xid;
     uint64_t cookie;
@@ -1080,41 +1159,41 @@
     of_match_t match;
 };
 
-struct ofp_meter_band {
-    uint16_t        type;
+struct of_meter_band {
+    uint16_t        type == ?;
     uint16_t        len;
 //    uint32_t        rate;  // These are excluded b/c this is the header
 //    uint32_t        burst_size;  // These are excluded b/c this is the header
 };
 
-struct ofp_meter_band_drop {
-    uint16_t        type;
+struct of_meter_band_drop : of_meter_band {
+    uint16_t        type == 1;
     uint16_t        len;
     uint32_t        rate;
     uint32_t        burst_size;
-    uint8_t[4]      pad;
+    pad(4);
 };
 
-struct ofp_meter_band_dscp_remark {
-    uint16_t        type;
+struct of_meter_band_dscp_remark : of_meter_band {
+    uint16_t        type == 2;
     uint16_t        len;
     uint32_t        rate;
     uint32_t        burst_size;
     uint8_t         prec_level;
-    uint8_t[3]      pad;
+    pad(3);
 };
 
-struct ofp_meter_band_experimenter {
-    uint16_t        type;
+struct of_meter_band_experimenter : of_meter_band {
+    uint16_t        type == 65535;
     uint16_t        len;
     uint32_t        rate;
     uint32_t        burst_size;
     uint32_t        experimenter;
 };
 
-struct ofp_meter_mod {
+struct of_meter_mod : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 29;
     uint16_t length;
     uint32_t xid;
     uint16_t command;
@@ -1123,39 +1202,178 @@
     list(of_meter_band_t) meters;
 };
 
-struct ofp_error_msg {
+struct of_error_msg : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 1;
     uint16_t length;
     uint32_t xid;
-    uint16_t err_type;
-    uint16_t code;
+    uint16_t err_type == ?;
+};
+
+struct of_hello_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 0;
+    enum ofp_hello_failed_code code;
     of_octets_t data;
 };
 
-//struct ofp_error_experimenter_msg {
-//    uint8_t version;
-//    uint8_t type;
-//    uint16_t length;
-//    uint32_t xid;
-//    uint16_t err_type;
-//    uint16_t subtype;
-//    uint32_t experimenter;
-//    of_octets_t data;
-//};
+struct of_bad_request_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 1;
+    enum ofp_bad_request_code code;
+    of_octets_t data;
+};
+
+struct of_bad_action_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 2;
+    enum ofp_bad_action_code code;
+    of_octets_t data;
+};
+
+struct of_bad_instruction_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 3;
+    enum ofp_bad_instruction_code code;
+    of_octets_t data;
+};
+
+struct of_bad_match_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 4;
+    enum ofp_bad_match_code code;
+    of_octets_t data;
+};
+
+struct of_flow_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 5;
+    enum ofp_flow_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_group_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 6;
+    enum ofp_group_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_port_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 7;
+    enum ofp_port_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_table_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 8;
+    enum ofp_table_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_queue_op_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 9;
+    enum ofp_queue_op_failed_code code;
+    of_octets_t data;
+};
+
+struct of_switch_config_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 10;
+    enum ofp_switch_config_failed_code code;
+    of_octets_t data;
+};
+
+struct of_role_request_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 11;
+    enum ofp_role_request_failed_code code;
+    of_octets_t data;
+};
+
+struct of_meter_mod_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 12;
+    enum ofp_meter_mod_failed_code code;
+    of_octets_t data;
+};
+
+struct of_table_features_failed_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 13;
+    enum ofp_table_features_failed_code code;
+    of_octets_t data;
+};
+
+struct of_experimenter_error_msg : of_error_msg {
+    uint8_t version;
+    uint8_t type == 1;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t err_type == 0xffff;
+    uint16_t subtype;
+    uint32_t experimenter;
+    of_octets_t data;
+};
 
 // STATS ENTRIES: flow, table, port, queue, group stats, group desc stats
 
-struct ofp_flow_stats_entry {
+struct of_flow_stats_entry {
     uint16_t length;
     uint8_t table_id;
-    uint8_t pad;
+    pad(1);
     uint32_t duration_sec;
     uint32_t duration_nsec;
     uint16_t priority;
     uint16_t idle_timeout;
     uint16_t hard_timeout;
-    uint8_t[6] pad2;
+    uint16_t flags;
+    pad(4);
     uint64_t cookie;
     uint64_t packet_count;
     uint64_t byte_count;
@@ -1164,17 +1382,17 @@
 };
 
 
-struct ofp_table_stats_entry {
+struct of_table_stats_entry {
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     uint32_t active_count;
     uint64_t lookup_count;
     uint64_t matched_count;
 };
 
-struct ofp_port_stats_entry {
+struct of_port_stats_entry {
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t rx_packets;
     uint64_t tx_packets;
     uint64_t rx_bytes;
@@ -1191,7 +1409,7 @@
     uint32_t duration_nsec;
 };
 
-struct ofp_queue_stats_entry {
+struct of_queue_stats_entry {
     of_port_no_t port_no;
     uint32_t queue_id;
     uint64_t tx_bytes;
@@ -1201,17 +1419,17 @@
     uint32_t duration_nsec;
 };
 
-struct ofp_bucket_counter {
+struct of_bucket_counter {
     uint64_t packet_count;
     uint64_t byte_count;
 };
 
-struct ofp_group_stats_entry {
+struct of_group_stats_entry {
     uint16_t length;
-    uint8_t[2] pad;
+    pad(2);
     uint32_t group_id;
     uint32_t ref_count;
-    uint8_t[4] pad;
+    pad(4);
     uint64_t packet_count;
     uint64_t byte_count;
     uint32_t duration_sec;
@@ -1219,10 +1437,10 @@
     list(of_bucket_counter_t) bucket_stats;
 };
 
-struct ofp_group_desc_stats_entry {
+struct of_group_desc_stats_entry {
     uint16_t length;
-    uint8_t type;
-    uint8_t pad;
+    enum ofp_group_type group_type;
+    pad(1);
     uint32_t group_id;
     list(of_bucket_t) buckets;
 };
@@ -1230,24 +1448,44 @@
 // STATS: 
 //  Desc, flow, agg, table, port, queue, group, group_desc, group_feat, experi
 
-struct ofp_desc_stats_request {
+struct of_stats_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == ?;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_desc_stats_reply {
+struct of_stats_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == ?;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+};
+
+struct of_desc_stats_request : of_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+};
+
+struct of_desc_stats_reply : of_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     of_desc_str_t mfr_desc;
     of_desc_str_t hw_desc;
     of_desc_str_t sw_desc;
@@ -1255,161 +1493,206 @@
     of_desc_str_t dp_desc;
 };
 
-struct ofp_flow_stats_request {
+struct of_flow_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 1;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_port_no_t out_port;
     uint32_t out_group;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t cookie;
     uint64_t cookie_mask;
     of_match_t match;
 };
 
-struct ofp_flow_stats_reply {
+struct of_flow_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 1;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_flow_stats_entry_t) entries;
 };
 
-struct ofp_aggregate_stats_request {
+struct of_aggregate_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 2;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint8_t table_id;
-    uint8_t[3] pad;
+    pad(3);
     of_port_no_t out_port;
     uint32_t out_group;
-    uint8_t[4] pad2;
+    pad(4);
     uint64_t cookie;
     uint64_t cookie_mask;
     of_match_t match;
 };
 
-struct ofp_aggregate_stats_reply {
+struct of_aggregate_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 2;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     uint64_t packet_count;
     uint64_t byte_count;
     uint32_t flow_count;
-    uint8_t[4] pad;
+    pad(4);
+};
+
+struct of_table_stats_request : of_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 3;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+};
+
+struct of_table_stats_reply : of_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 3;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    list(of_table_stats_entry_t) entries;
+};
+
+struct of_experimenter_stats_request : of_stats_request {
+    uint8_t version;
+    uint8_t type == 18;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_request_flags flags;
+    pad(4);
+    uint32_t experimenter == ?;
+    uint32_t subtype;
+};
+
+struct of_experimenter_stats_reply : of_stats_reply {
+    uint8_t version;
+    uint8_t type == 19;
+    uint16_t length;
+    uint32_t xid;
+    uint16_t stats_type == 0xffff;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
+    uint32_t experimenter == ?;
+    uint32_t subtype;
 };
 
 // FIXME: These are padded to 8 byte align beyond the length indicated
 
-struct ofp_table_feature_prop {
-    uint16_t         type;
+struct of_table_feature_prop {
+    uint16_t         type == ?;
     uint16_t         length;
 };
 
-struct ofp_table_feature_prop_instructions {
-    uint16_t         type;
+struct of_table_feature_prop_instructions : of_table_feature_prop {
+    uint16_t         type == 0;
     uint16_t         length;
     // FIXME Check if instruction_t is right for ids here
     list(of_instruction_t)   instruction_ids;
 };
 
-struct ofp_table_feature_prop_instructions_miss {
-    uint16_t         type;
+struct of_table_feature_prop_instructions_miss : of_table_feature_prop {
+    uint16_t         type == 1;
     uint16_t         length;
     list(of_instruction_t)   instruction_ids;
 };
 
-struct ofp_table_feature_prop_next_tables {
-    uint16_t         type;
+struct of_table_feature_prop_next_tables : of_table_feature_prop {
+    uint16_t         type == 2;
     uint16_t         length;
     list(of_uint8_t) next_table_ids;
 };
 
-struct ofp_table_feature_prop_next_tables_miss {
-    uint16_t         type;
+struct of_table_feature_prop_next_tables_miss : of_table_feature_prop {
+    uint16_t         type == 3;
     uint16_t         length;
     list(of_uint8_t) next_table_ids;
 };
 
-struct ofp_table_feature_prop_write_actions {
-    uint16_t         type;
+struct of_table_feature_prop_write_actions : of_table_feature_prop {
+    uint16_t         type == 4;
     uint16_t         length;
     list(of_action_id_t) action_ids;
 };
 
-struct ofp_table_feature_prop_write_actions_miss {
-    uint16_t         type;
+struct of_table_feature_prop_write_actions_miss : of_table_feature_prop {
+    uint16_t         type == 5;
     uint16_t         length;
     list(of_action_id_t) action_ids;
 };
 
-struct ofp_table_feature_prop_apply_actions {
-    uint16_t         type;
+struct of_table_feature_prop_apply_actions : of_table_feature_prop {
+    uint16_t         type == 6;
     uint16_t         length;
     list(of_action_id_t) action_ids;
 };
 
-struct ofp_table_feature_prop_apply_actions_miss {
-    uint16_t         type;
+struct of_table_feature_prop_apply_actions_miss : of_table_feature_prop {
+    uint16_t         type == 7;
     uint16_t         length;
     list(of_action_id_t) action_ids;
 };
 
-struct ofp_table_feature_prop_match {
-    uint16_t         type;
+struct of_table_feature_prop_match : of_table_feature_prop {
+    uint16_t         type == 8;
     uint16_t         length;
     list(of_uint32_t) oxm_ids;
 };
 
-struct ofp_table_feature_prop_wildcards {
-    uint16_t         type;
+struct of_table_feature_prop_wildcards : of_table_feature_prop {
+    uint16_t         type == 10;
     uint16_t         length;
     list(of_uint32_t) oxm_ids;
 };
 
-struct ofp_table_feature_prop_write_setfield {
-    uint16_t         type;
+struct of_table_feature_prop_write_setfield : of_table_feature_prop {
+    uint16_t         type == 12;
     uint16_t         length;
     list(of_uint32_t) oxm_ids;
 };
 
-struct ofp_table_feature_prop_write_setfield_miss {
-    uint16_t         type;
+struct of_table_feature_prop_write_setfield_miss : of_table_feature_prop {
+    uint16_t         type == 13;
     uint16_t         length;
     list(of_uint32_t) oxm_ids;
 };
 
-struct ofp_table_feature_prop_apply_setfield {
-    uint16_t         type;
+struct of_table_feature_prop_apply_setfield : of_table_feature_prop {
+    uint16_t         type == 14;
     uint16_t         length;
     list(of_uint32_t) oxm_ids;
 };
 
-struct ofp_table_feature_prop_apply_setfield_miss {
-    uint16_t         type;
+struct of_table_feature_prop_apply_setfield_miss : of_table_feature_prop {
+    uint16_t         type == 15;
     uint16_t         length;
     list(of_uint32_t) oxm_ids;
 };
 
-struct ofp_table_feature_prop_experimenter {
-    uint16_t         type;
+struct of_table_feature_prop_experimenter : of_table_feature_prop {
+    uint16_t         type == 65535;
     uint16_t         length;
     uint32_t         experimenter;
     uint32_t         subtype;
@@ -1417,7 +1700,7 @@
 };
 
 // Not yet supported
-// struct ofp_table_feature_prop_experimenter_miss {
+// struct of_table_feature_prop_experimenter_miss : of_table_feature_prop {
 //     uint16_t         type;
 //     uint16_t         length;
 //     uint32_t         experimenter;
@@ -1425,10 +1708,10 @@
 //     of_octets_t      experimenter_data;
 // };
 
-struct ofp_table_features {
+struct of_table_features {
     uint16_t length;
     uint8_t table_id;
-    uint8_t[5] pad;
+    pad(5);
     of_table_name_t name;
     uint64_t metadata_match;
     uint64_t metadata_write;
@@ -1437,123 +1720,123 @@
     list(of_table_feature_prop_t) properties;
 };
 
-struct ofp_meter_features {
+struct of_meter_features {
     uint32_t    max_meter;
     uint32_t    band_types;
     uint32_t    capabilities;
     uint8_t     max_bands;
     uint8_t     max_color;
-    uint8_t[2]     pad;
+    pad(2);
 };
 
-struct ofp_port_stats_request {
+struct of_port_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 4;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     of_port_no_t port_no;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_port_stats_reply {
+struct of_port_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 4;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_port_stats_entry_t) entries;
 };
 
-struct ofp_queue_stats_request {
+struct of_queue_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 5;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     of_port_no_t port_no;
     uint32_t queue_id;
 };
 
-struct ofp_queue_stats_reply {
+struct of_queue_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 5;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_queue_stats_entry_t) entries;
 };
 
-struct ofp_group_stats_request {
+struct of_group_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 6;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint32_t group_id;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_group_stats_reply {
+struct of_group_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 6;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_group_stats_entry_t) entries;
 };
 
-struct ofp_group_desc_stats_request {
+struct of_group_desc_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 7;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_group_desc_stats_reply {
+struct of_group_desc_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 7;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_group_desc_stats_entry_t) entries;
 };
 
-struct ofp_group_features_stats_request {
+struct of_group_features_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 8;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
-struct ofp_group_features_stats_reply {
+struct of_group_features_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 8;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     uint32_t types;
     uint32_t capabilities;
     uint32_t max_groups_all;
@@ -1566,131 +1849,131 @@
     uint32_t actions_ff;
 };
 
-struct ofp_meter_stats_request {
+struct of_meter_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 9;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint32_t meter_id;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_meter_stats_reply {
+struct of_meter_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 9;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_meter_stats_t) entries;
 };
 
-struct ofp_meter_config_stats_request {
+struct of_meter_config_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 10;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     uint32_t meter_id;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_meter_config_stats_reply {
+struct of_meter_config_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 10;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_meter_band_t) entries;
 };
 
 // FIXME stats added to get things working
-struct ofp_meter_features_stats_request {
+struct of_meter_features_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 11;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
 // FIXME stats added to get things working
-struct ofp_meter_features_stats_reply {
+struct of_meter_features_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 11;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     of_meter_features_t features;
 };
 
 // FIXME stats added to get things working
-struct ofp_table_features_stats_request {
+struct of_table_features_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 12;
+    enum ofp_stats_request_flags flags;
+    pad(4);
     list(of_table_features_t) entries;
 };
 
 // FIXME stats added to get things working
-struct ofp_table_features_stats_reply {
+struct of_table_features_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 12;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_table_features_t) entries;
 };
 
 // FIXME stats added to get things working
-struct ofp_port_desc_stats_request {
+struct of_port_desc_stats_request : of_stats_request {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 18;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 13;
+    enum ofp_stats_request_flags flags;
+    pad(4);
 };
 
 // FIXME stats added to get things working
-struct ofp_port_desc_stats_reply {
+struct of_port_desc_stats_reply : of_stats_reply {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 19;
     uint16_t length;
     uint32_t xid;
-    uint16_t stats_type;
-    uint16_t flags;
-    uint8_t[4] pad;
+    uint16_t stats_type == 13;
+    enum ofp_stats_reply_flags flags;
+    pad(4);
     list(of_port_desc_t) entries;
 };
 
-struct ofp_meter_band_stats {
+struct of_meter_band_stats {
     uint64_t        packet_band_count;
     uint64_t        byte_band_count;
 };
 
-struct ofp_meter_stats {
+struct of_meter_stats {
     uint32_t        meter_id;
     uint16_t        len;
-    uint8_t[6]         pad;
+    pad(6);
     uint32_t        flow_count;
     uint64_t        packet_in_count;
     uint64_t        byte_in_count;
@@ -1699,94 +1982,96 @@
     list(of_meter_band_stats_t) band_stats;
 };
 
-struct ofp_meter_config {
+struct of_meter_config {
     uint16_t        length;
     uint16_t        flags;
     uint32_t        meter_id;
     list(of_meter_band_t) entries;
 };
 
-struct ofp_experimenter_multipart_header {
-    uint32_t experimenter;
+struct of_experimenter_stats_header {
+    uint32_t experimenter == ?;
     uint32_t subtype;
 };
 
 // END OF STATS OBJECTS
 
-struct ofp_queue_prop {
-    uint16_t type;
+struct of_queue_prop {
+    uint16_t type == ?;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_prop_min_rate {
-    uint16_t type;
+struct of_queue_prop_min_rate : of_queue_prop {
+    uint16_t type == 1;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint16_t rate;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_queue_prop_max_rate {
-    uint16_t type;
+struct of_queue_prop_max_rate : of_queue_prop {
+    uint16_t type == 2;
     uint16_t len;
-    uint8_t[4] pad;
+    pad(4);
     uint16_t rate;
-    uint8_t[6] pad;
+    pad(6);
 };
 
-struct ofp_queue_prop_experimenter {
-    uint16_t type;
+struct of_queue_prop_experimenter : of_queue_prop {
+    uint16_t type == 65535;
     uint16_t len;
-    uint8_t[4] pad;
-    uint32_t experimenter;
-    uint8_t[4] pad;
+    pad(4);
+    uint32_t experimenter == ?;
+    pad(4);
     of_octets_t data;
 };
 
-struct ofp_packet_queue {
+struct of_packet_queue {
     uint32_t queue_id;
     of_port_no_t port;
     uint16_t len;
-    uint8_t[6] pad;
+    pad(6);
     list(of_queue_prop_t) properties;
 };
 
-struct ofp_queue_get_config_request {
+struct of_queue_get_config_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 22;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[4] pad;
+    pad(4);
 };
 
-struct ofp_queue_get_config_reply {
+struct of_queue_get_config_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 23;
     uint16_t length;
     uint32_t xid;
     of_port_no_t port;
-    uint8_t[4] pad;
+    pad(4);
     list(of_packet_queue_t) queues;
 };
 
-struct ofp_role_request {
+struct of_role_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 24;
     uint16_t length;
     uint32_t xid;
-    uint32_t role;
-    uint8_t[4] pad;
+    enum ofp_controller_role role;
+    pad(4);
     uint64_t generation_id;
 };
 
-struct ofp_role_reply {
+struct of_role_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 25;
     uint16_t length;
     uint32_t xid;
-    of_octets_t data;
+    enum ofp_controller_role role;
+    pad(4);
+    uint64_t generation_id;
 };
 
 ////////////////////////////////////////////////////////////////
@@ -1796,9 +2081,9 @@
 //   while uint32_t[1] is interest for slave
 ////////////////////////////////////////////////////////////////
 
-struct ofp_async_get_request {
+struct of_async_get_request : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 26;
     uint16_t length;
     uint32_t xid;
     uint32_t packet_in_mask_equal_master;
@@ -1809,9 +2094,9 @@
     uint32_t flow_removed_mask_slave;
 };
 
-struct ofp_async_get_reply {
+struct of_async_get_reply : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 27;
     uint16_t length;
     uint32_t xid;
     uint32_t packet_in_mask_equal_master;
@@ -1822,9 +2107,9 @@
     uint32_t flow_removed_mask_slave;
 };
 
-struct ofp_async_set {
+struct of_async_set : of_header {
     uint8_t version;
-    uint8_t type;
+    uint8_t type == 28;
     uint16_t length;
     uint32_t xid;
     uint32_t packet_in_mask_equal_master;
diff --git a/py_gen/codegen.py b/py_gen/codegen.py
index 67529e0..44b4814 100644
--- a/py_gen/codegen.py
+++ b/py_gen/codegen.py
@@ -25,144 +25,110 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 
-from collections import namedtuple
-import of_g
-import loxi_front_end.type_maps as type_maps
+from collections import defaultdict
+import loxi_globals
+import struct
+import template_utils
 import loxi_utils.loxi_utils as utils
 import util
 import oftype
+from loxi_ir import *
 
-OFClass = namedtuple('OFClass', ['name', 'pyname',
-                                 'members', 'length_member', 'type_members',
-                                 'min_length', 'is_fixed_length'])
-Member = namedtuple('Member', ['name', 'oftype', 'offset', 'skip'])
-LengthMember = namedtuple('LengthMember', ['name', 'oftype', 'offset'])
-TypeMember = namedtuple('TypeMember', ['name', 'oftype', 'offset', 'value'])
+modules_by_version = {}
 
-def get_type_values(cls, version):
-    """
-    Returns a map from the name of the type member to its value.
-    """
-    type_values = {}
+# Map from inheritance root to module name
+roots = {
+    'of_header': 'message',
+    'of_action': 'action',
+    'of_action_id': 'action_id',
+    'of_oxm': 'oxm',
+    'of_instruction': 'instruction',
+    'of_instruction_id': 'instruction_id',
+    'of_meter_band': 'meter_band',
+    'of_bsn_tlv': 'bsn_tlv',
+}
 
-    # Primary wire type
-    if utils.class_is_message(cls):
-        type_values['version'] = 'const.OFP_VERSION'
-        type_values['type'] = util.constant_for_value(version, "ofp_type", util.primary_wire_type(cls, version))
-        if cls in type_maps.flow_mod_list:
-            type_values['_command'] = util.constant_for_value(version, "ofp_flow_mod_command",
-                                                              type_maps.flow_mod_types[version][cls[8:]])
-        if cls in type_maps.stats_request_list:
-            type_values['stats_type'] = util.constant_for_value(version, "ofp_stats_types",
-                                                                type_maps.stats_types[version][cls[3:-14]])
-        if cls in type_maps.stats_reply_list:
-            type_values['stats_type'] = util.constant_for_value(version, "ofp_stats_types",
-                                                                type_maps.stats_types[version][cls[3:-12]])
-        if type_maps.message_is_extension(cls, version):
-            type_values['experimenter'] = '%#x' % type_maps.extension_to_experimenter_id(cls)
-            type_values['subtype'] = type_maps.extension_message_to_subtype(cls, version)
-    elif utils.class_is_action(cls):
-        type_values['type'] = util.constant_for_value(version, "ofp_action_type", util.primary_wire_type(cls, version))
-        if type_maps.action_is_extension(cls, version):
-            type_values['experimenter'] = '%#x' % type_maps.extension_to_experimenter_id(cls)
-            type_values['subtype'] = type_maps.extension_action_to_subtype(cls, version)
-    elif utils.class_is_queue_prop(cls):
-        type_values['type'] = util.constant_for_value(version, "ofp_queue_properties", util.primary_wire_type(cls, version))
-
-    return type_values
-
-# Create intermediate representation
-def build_ofclasses(version):
-    blacklist = ["of_action", "of_action_header", "of_header", "of_queue_prop",
-                 "of_queue_prop_header", "of_experimenter", "of_action_experimenter"]
-    ofclasses = []
-    for cls in of_g.standard_class_order:
-        if version not in of_g.unified[cls] or cls in blacklist:
-            continue
-        unified_class = util.lookup_unified_class(cls, version)
-
-        # Name for the generated Python class
-        if utils.class_is_action(cls):
-            pyname = cls[10:]
-        else:
-            pyname = cls[3:]
-
-        type_values = get_type_values(cls, version)
-        members = []
-
-        length_member = None
-        type_members = []
-        pad_count = 0
-
-        for member in unified_class['members']:
-            if member['name'] in ['length', 'len']:
-                length_member = LengthMember(name=member['name'],
-                                             offset=member['offset'],
-                                             oftype=oftype.OFType(member['m_type'], version))
-            elif member['name'] in type_values:
-                type_members.append(TypeMember(name=member['name'],
-                                               offset=member['offset'],
-                                               oftype=oftype.OFType(member['m_type'], version),
-                                               value=type_values[member['name']]))
+# Return the module and class names for the generated Python class
+def generate_pyname(ofclass):
+    for root, module_name in roots.items():
+        if ofclass.name == root:
+            return module_name, module_name
+        elif ofclass.is_instanceof(root):
+            if root == 'of_header':
+                # The input files don't prefix message names
+                return module_name, ofclass.name[3:]
             else:
-                # HACK ensure member names are unique
-                if member['name'].startswith("pad"):
-                    if pad_count == 0:
-                        m_name = 'pad'
-                    else:
-                        m_name = "pad%d" % pad_count
-                    pad_count += 1
-                else:
-                    m_name = member['name']
-                members.append(Member(name=m_name,
-                                      oftype=oftype.OFType(member['m_type'], version),
-                                      offset=member['offset'],
-                                      skip=member['name'] in of_g.skip_members))
+                return module_name, ofclass.name[len(root)+1:]
+    return 'common', ofclass.name[3:]
 
-        ofclasses.append(
-            OFClass(name=cls,
-                    pyname=pyname,
-                    members=members,
-                    length_member=length_member,
-                    type_members=type_members,
-                    min_length=of_g.base_length[(cls, version)],
-                    is_fixed_length=(cls, version) in of_g.is_fixed_length))
-    return ofclasses
+# Create intermediate representation, extended from the LOXI IR
+def build_ofclasses(version):
+    modules = defaultdict(list)
+    for ofclass in loxi_globals.ir[version].classes:
+        module_name, ofclass.pyname = generate_pyname(ofclass)
+        modules[module_name].append(ofclass)
+    return modules
 
 def generate_init(out, name, version):
-    util.render_template(out, 'init.py')
+    util.render_template(out, 'init.py', version=version)
 
 def generate_action(out, name, version):
-    ofclasses = [x for x in build_ofclasses(version)
-                 if utils.class_is_action(x.name)]
-    util.render_template(out, 'action.py', ofclasses=ofclasses)
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['action'],
+                         version=version)
+
+def generate_action_id(out, name, version):
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['action_id'],
+                         version=version)
+
+def generate_oxm(out, name, version):
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['oxm'],
+                         version=version)
 
 def generate_common(out, name, version):
-    ofclasses = [x for x in build_ofclasses(version)
-                 if not utils.class_is_message(x.name)
-                    and not utils.class_is_action(x.name)
-                    and not utils.class_is_list(x.name)]
-    util.render_template(out, 'common.py', ofclasses=ofclasses)
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['common'],
+                         version=version,
+                         extra_template='_common_extra.py')
 
 def generate_const(out, name, version):
-    groups = {}
-    for (group, idents) in of_g.identifiers_by_group.items():
-        items = []
-        for ident in idents:
-            info = of_g.identifiers[ident]
-            if version in info["values_by_version"]:
-                items.append((info["ofp_name"], info["values_by_version"][version]))
-        if items:
-            groups[group] = items
-    util.render_template(out, 'const.py', version=version, groups=groups)
+    util.render_template(out, 'const.py', version=version,
+                         enums=loxi_globals.ir[version].enums)
+
+def generate_instruction(out, name, version):
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['instruction'],
+                         version=version)
+
+def generate_instruction_id(out, name, version):
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['instruction_id'],
+                         version=version)
 
 def generate_message(out, name, version):
-    ofclasses = [x for x in build_ofclasses(version)
-                 if utils.class_is_message(x.name)]
-    util.render_template(out, 'message.py', ofclasses=ofclasses, version=version)
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['message'],
+                         version=version,
+                         extra_template='_message_extra.py')
+
+def generate_meter_band(out, name, version):
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['meter_band'],
+                         version=version)
 
 def generate_pp(out, name, version):
     util.render_template(out, 'pp.py')
 
 def generate_util(out, name, version):
-    util.render_template(out, 'util.py')
+    util.render_template(out, 'util.py', version=version)
+
+def generate_bsn_tlv(out, name, version):
+    util.render_template(out, 'module.py',
+                         ofclasses=modules_by_version[version]['bsn_tlv'],
+                         version=version)
+
+def init():
+    for version in loxi_globals.OFVersions.target_versions:
+        modules_by_version[version] = build_ofclasses(version)
diff --git a/py_gen/oftype.py b/py_gen/oftype.py
index ee5040c..3b56308 100644
--- a/py_gen/oftype.py
+++ b/py_gen/oftype.py
@@ -25,156 +25,180 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 
-import of_g
-import loxi_utils.loxi_utils as utils
-import unittest
+from collections import namedtuple
 
-class OFType(object):
-    """
-    Encapsulates knowledge about the OpenFlow type system.
-    """
+import loxi_utils.loxi_utils as loxi_utils
+import py_gen.codegen
+import loxi_globals
 
-    version = None
-    base = None
-    is_array = False
-    array_length = None
+OFTypeData = namedtuple("OFTypeData", ["init", "pack", "unpack"])
 
-    def __init__(self, string, version):
-        self.version = version
-        self.array_length, self.base = utils.type_dec_to_count_base(string)
-        self.is_array = self.array_length != 1
+# Map from LOXI type name to an object with templates for init, pack, and unpack
+# Most types are defined using the convenience code below. This dict should
+# only be used directly for special cases such as primitive types.
+type_data_map = {
+    'char': OFTypeData(
+        init='0',
+        pack='struct.pack("!B", %s)',
+        unpack='%s.read("!B")[0]'),
 
-    def gen_init_expr(self):
-        if utils.class_is_list(self.base):
-            v = "[]"
-        elif self.base.find("uint") == 0 or self.base in ["char", "of_port_no_t"]:
-            v = "0"
-        elif self.base == 'of_mac_addr_t':
-            v = '[0,0,0,0,0,0]'
-        elif self.base == 'of_wc_bmap_t':
-            v = 'const.OFPFW_ALL'
-        elif self.base in ['of_octets_t', 'of_port_name_t', 'of_table_name_t',
-                           'of_desc_str_t', 'of_serial_num_t']:
-            v = '""'
-        elif self.base == 'of_match_t':
-            v = 'common.match()'
-        elif self.base == 'of_port_desc_t':
-            v = 'common.port_desc()'
-        else:
-            v = "None"
+    'uint8_t': OFTypeData(
+        init='0',
+        pack='struct.pack("!B", %s)',
+        unpack='%s.read("!B")[0]'),
 
-        if self.is_array:
-            return "[" + ','.join([v] * self.array_length) + "]"
-        else:
-            return v
+    'uint16_t': OFTypeData(
+        init='0',
+        pack='struct.pack("!H", %s)',
+        unpack='%s.read("!H")[0]'),
 
-    def gen_pack_expr(self, expr_expr):
-        pack_fmt = self._pack_fmt()
-        if pack_fmt and not self.is_array:
-            return 'struct.pack("!%s", %s)' % (pack_fmt, expr_expr)
-        elif pack_fmt and self.is_array:
-            return 'struct.pack("!%s%s", *%s)' % (self.array_length, pack_fmt, expr_expr)
-        elif self.base == 'of_octets_t':
-            return expr_expr
-        elif utils.class_is_list(self.base):
-            return '"".join([x.pack() for x in %s])' % expr_expr
-        elif self.base == 'of_mac_addr_t':
-            return 'struct.pack("!6B", *%s)' % expr_expr
-        elif self.base in ['of_match_t', 'of_port_desc_t']:
-            return '%s.pack()' % expr_expr
-        elif self.base == 'of_port_name_t':
-            return self._gen_string_pack_expr(16, expr_expr)
-        elif self.base == 'of_table_name_t' or self.base == 'of_serial_num_t':
-            return self._gen_string_pack_expr(32, expr_expr)
-        elif self.base == 'of_desc_str_t':
-            return self._gen_string_pack_expr(256, expr_expr)
-        else:
-            return "'TODO pack %s'" % self.base
+    'uint32_t': OFTypeData(
+        init='0',
+        pack='struct.pack("!L", %s)',
+        unpack='%s.read("!L")[0]'),
 
-    def _gen_string_pack_expr(self, length, expr_expr):
-        return 'struct.pack("!%ds", %s)' % (length, expr_expr)
+    'uint64_t': OFTypeData(
+        init='0',
+        pack='struct.pack("!Q", %s)',
+        unpack='%s.read("!Q")[0]'),
 
-    def gen_unpack_expr(self, buf_expr, offset_expr):
-        pack_fmt = self._pack_fmt()
-        if pack_fmt and not self.is_array:
-            return "struct.unpack_from('!%s', %s, %s)[0]" % (pack_fmt, buf_expr, offset_expr)
-        elif pack_fmt and self.is_array:
-            return "list(struct.unpack_from('!%d%s', %s, %s))" % (self.array_length, pack_fmt, buf_expr, offset_expr)
-        elif self.base == 'of_octets_t':
-            return "%s[%s:]" % (buf_expr, offset_expr)
-        elif self.base == 'of_mac_addr_t':
-            return "list(struct.unpack_from('!6B', %s, %s))" % (buf_expr, offset_expr)
-        elif self.base == 'of_match_t':
-            return 'common.match.unpack(buffer(%s, %s))' % (buf_expr, offset_expr)
-        elif self.base == 'of_port_desc_t':
-            return 'common.port_desc.unpack(buffer(%s, %s))' % (buf_expr, offset_expr)
-        elif self.base == 'of_list_action_t':
-            return 'action.unpack_list(buffer(%s, %s))' % (buf_expr, offset_expr)
-        elif self.base == 'of_list_flow_stats_entry_t':
-            return 'common.unpack_list_flow_stats_entry(buffer(%s, %s))' % (buf_expr, offset_expr)
-        elif self.base == 'of_list_queue_prop_t':
-            return 'common.unpack_list_queue_prop(buffer(%s, %s))' % (buf_expr, offset_expr)
-        elif self.base == 'of_list_packet_queue_t':
-            return 'common.unpack_list_packet_queue(buffer(%s, %s))' % (buf_expr, offset_expr)
-        elif self.base == 'of_port_name_t':
-            return self._gen_string_unpack_expr(16, buf_expr, offset_expr)
-        elif self.base == 'of_table_name_t' or self.base == 'of_serial_num_t':
-            return self._gen_string_unpack_expr(32, buf_expr, offset_expr)
-        elif self.base == 'of_desc_str_t':
-            return self._gen_string_unpack_expr(256, buf_expr, offset_expr)
-        elif utils.class_is_list(self.base):
-            element_cls = utils.list_to_entry_type(self.base)[:-2]
-            if ((element_cls, self.version) in of_g.is_fixed_length):
-                klass_name = self.base[8:-2]
-                element_size, = of_g.base_length[(element_cls, self.version)],
-                return 'util.unpack_array(common.%s.unpack, %d, buffer(%s, %s))' % (klass_name, element_size, buf_expr, offset_expr)
-            else:
-                return "None # TODO unpack list %s" % self.base
-        else:
-            return "None # TODO unpack %s" % self.base
+    'of_port_no_t': OFTypeData(
+        init='0',
+        pack='util.pack_port_no(%s)',
+        unpack='util.unpack_port_no(%s)'),
 
-    def _gen_string_unpack_expr(self, length, buf_expr, offset_expr):
-        return 'str(buffer(%s, %s, %d)).rstrip("\\x00")' % (buf_expr, offset_expr, length)
+    'of_fm_cmd_t': OFTypeData(
+        init='0',
+        pack='util.pack_fm_cmd(%s)',
+        unpack='util.unpack_fm_cmd(%s)'),
 
-    def _pack_fmt(self):
-        if self.base == "char":
-            return "B"
-        if self.base == "uint8_t":
-            return "B"
-        if self.base == "uint16_t":
-            return "H"
-        if self.base == "uint32_t":
-            return "L"
-        if self.base == "uint64_t":
-            return "Q"
-        if self.base == "of_port_no_t":
-            if self.version == of_g.VERSION_1_0:
-                return "H"
-            else:
-                return "L"
-        if self.base == "of_fm_cmd_t":
-            if self.version == of_g.VERSION_1_0:
-                return "H"
-            else:
-                return "B"
-        if self.base in ["of_wc_bmap_t", "of_match_bmap_t"]:
-            if self.version in [of_g.VERSION_1_0, of_g.VERSION_1_1]:
-                return "L"
-            else:
-                return "Q"
-        return None
+    'of_wc_bmap_t': OFTypeData(
+        init='util.init_wc_bmap()',
+        pack='util.pack_wc_bmap(%s)',
+        unpack='util.unpack_wc_bmap(%s)'),
 
-class TestOFType(unittest.TestCase):
-    def test_init(self):
-        from oftype import OFType
-        self.assertEquals("None", OFType("of_list_action_t", 1).gen_init_expr())
-        self.assertEquals("[0,0,0]", OFType("uint32_t[3]", 1).gen_init_expr())
+    'of_match_bmap_t': OFTypeData(
+        init='util.init_match_bmap()',
+        pack='util.pack_match_bmap(%s)',
+        unpack='util.unpack_match_bmap(%s)'),
 
-    def test_pack(self):
-        self.assertEquals('struct.pack("!16s", "foo")', OFType("of_port_name_t", 1).gen_pack_expr('"foo"'))
+    'of_ipv4_t': OFTypeData(
+        init='0',
+        pack='struct.pack("!L", %s)',
+        unpack='%s.read("!L")[0]'),
 
-    def test_unpack(self):
-        self.assertEquals('str(buffer(buf, 8, 16)).rstrip("\\x00")', OFType("of_port_name_t", 1).gen_unpack_expr('buf', 8))
+    'of_ipv6_t': OFTypeData(
+        init="'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'",
+        pack='struct.pack("!16s", %s)',
+        unpack="%s.read('!16s')[0]"),
 
-if __name__ == '__main__':
-    unittest.main()
+    'of_mac_addr_t': OFTypeData(
+        init='[0,0,0,0,0,0]',
+        pack='struct.pack("!6B", *%s)',
+        unpack="list(%s.read('!6B'))"),
+
+    'of_octets_t': OFTypeData(
+        init="''",
+        pack='%s',
+        unpack='str(%s.read_all())'),
+
+    'of_bitmap_128_t': OFTypeData(
+        init='set()',
+        pack='util.pack_bitmap_128(%s)',
+        unpack="util.unpack_bitmap_128(%s)"),
+
+    'of_oxm_t': OFTypeData(
+        init='None',
+        pack='%s.pack()',
+        unpack='oxm.oxm.unpack(%s)'),
+
+    'of_checksum_128_t': OFTypeData(
+        init='0',
+        pack='util.pack_checksum_128(%s)',
+        unpack="util.unpack_checksum_128(%s)"),
+}
+
+## Fixed length strings
+
+# Map from class name to length
+fixed_length_strings = {
+    'of_port_name_t': 16,
+    'of_table_name_t': 32,
+    'of_serial_num_t': 32,
+    'of_desc_str_t': 256,
+}
+
+for (cls, length) in fixed_length_strings.items():
+    type_data_map[cls] = OFTypeData(
+        init='""',
+        pack='struct.pack("!%ds", %%s)' % length,
+        unpack='%%s.read("!%ds")[0].rstrip("\\x00")' % length)
+
+## Embedded structs
+
+# Map from class name to Python class name
+embedded_structs = {
+    'of_match_t': 'common.match',
+    'of_port_desc_t': 'common.port_desc',
+    'of_meter_features_t': 'common.meter_features',
+    'of_bsn_vport_q_in_q_t': 'common.bsn_vport_q_in_q',
+}
+
+for (cls, pyclass) in embedded_structs.items():
+    type_data_map[cls] = OFTypeData(
+        init='%s()' % pyclass,
+        pack='%s.pack()',
+        unpack='%s.unpack(%%s)' % pyclass)
+
+## Public interface
+
+def lookup_type_data(oftype, version):
+    return type_data_map.get(loxi_utils.lookup_ir_wiretype(oftype, version))
+
+# Return an initializer expression for the given oftype
+def gen_init_expr(oftype, version):
+    type_data = lookup_type_data(oftype, version)
+    if type_data and type_data.init:
+        return type_data.init
+    elif oftype_is_list(oftype):
+        return "[]"
+    else:
+        return "loxi.unimplemented('init %s')" % oftype
+
+# Return a pack expression for the given oftype
+#
+# 'value_expr' is a string of Python code which will evaluate to
+# the value to be packed.
+def gen_pack_expr(oftype, value_expr, version):
+    type_data = lookup_type_data(oftype, version)
+    if type_data and type_data.pack:
+        return type_data.pack % value_expr
+    elif oftype_is_list(oftype):
+        return "loxi.generic_util.pack_list(%s)" % value_expr
+    else:
+        return "loxi.unimplemented('pack %s')" % oftype
+
+# Return an unpack expression for the given oftype
+#
+# 'reader_expr' is a string of Python code which will evaluate to
+# the OFReader instance used for deserialization.
+def gen_unpack_expr(oftype, reader_expr, version):
+    type_data = lookup_type_data(oftype, version)
+    if type_data and type_data.unpack:
+        return type_data.unpack % reader_expr
+    elif oftype_is_list(oftype):
+        ofproto = loxi_globals.ir[version]
+        ofclass = ofproto.class_by_name(oftype_list_elem(oftype))
+        module_name, class_name = py_gen.codegen.generate_pyname(ofclass)
+        return 'loxi.generic_util.unpack_list(%s, %s.%s.unpack)' % \
+            (reader_expr, module_name, class_name)
+    else:
+        return "loxi.unimplemented('unpack %s')" % oftype
+
+def oftype_is_list(oftype):
+    return (oftype.find("list(") == 0)
+
+# Converts "list(of_flow_stats_entry_t)" to "of_flow_stats_entry"
+def oftype_list_elem(oftype):
+    assert oftype.find("list(") == 0
+    return oftype[5:-3]
diff --git a/py_gen/sphinx/conf.py b/py_gen/sphinx/conf.py
new file mode 100644
index 0000000..ef007c1
--- /dev/null
+++ b/py_gen/sphinx/conf.py
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+#
+# PyLoxi documentation build configuration file, created by
+# sphinx-quickstart on Mon Jun  3 14:14:34 2013.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../../pyloxi'))
+
+import loxi
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'PyLoxi'
+copyright = u'2013, Big Switch Networks'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = ''
+# The full version, including alpha/beta/rc tags.
+release = ''
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'PyLoxidoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'PyLoxi.tex', u'PyLoxi Documentation',
+   u'Author', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'pyloxi', u'PyLoxi Documentation',
+     [u'Author'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'PyLoxi', u'PyLoxi Documentation',
+   u'Author', 'PyLoxi', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'PyLoxi'
+epub_author = u'Author'
+epub_publisher = u'Author'
+epub_copyright = u'2013, Author'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# A tuple containing the cover image and cover page html template filenames.
+#epub_cover = ()
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
diff --git a/py_gen/sphinx/index.rst b/py_gen/sphinx/index.rst
new file mode 100644
index 0000000..d5e356b
--- /dev/null
+++ b/py_gen/sphinx/index.rst
@@ -0,0 +1,22 @@
+.. PyLoxi documentation master file, created by
+   sphinx-quickstart on Mon Jun  3 14:14:34 2013.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to PyLoxi's documentation!
+==================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 4
+
+   loxi
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/py_gen/templates/_pack_packet_out.py b/py_gen/templates/_common_extra.py
similarity index 67%
copy from py_gen/templates/_pack_packet_out.py
copy to py_gen/templates/_common_extra.py
index ad8b827..8639dc4 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/py_gen/templates/_common_extra.py
@@ -25,15 +25,14 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+:: from loxi_globals import OFVersions
+:: if version == OFVersions.VERSION_1_0:
+match = match_v1
+:: elif version == OFVersions.VERSION_1_1:
+match = match_v2
+:: elif version == OFVersions.VERSION_1_2:
+match = match_v3
+:: elif version == OFVersions.VERSION_1_3:
+:: # HACK
+match = match_v3
+:: #endif
diff --git a/py_gen/templates/_copyright.py b/py_gen/templates/_copyright.py
index 24dc1b6..6084abe 100644
--- a/py_gen/templates/_copyright.py
+++ b/py_gen/templates/_copyright.py
@@ -28,3 +28,4 @@
 # Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
 # Copyright (c) 2011, 2012 Open Networking Foundation
 # Copyright (c) 2012, 2013 Big Switch Networks, Inc.
+# See the file LICENSE.pyloxi which should have been included in the source distribution
diff --git a/py_gen/templates/_pack_packet_out.py b/py_gen/templates/_message_extra.py
similarity index 67%
rename from py_gen/templates/_pack_packet_out.py
rename to py_gen/templates/_message_extra.py
index ad8b827..9fc2f5e 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/py_gen/templates/_message_extra.py
@@ -25,15 +25,15 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+def parse_header(buf):
+    if len(buf) < 8:
+        raise loxi.ProtocolError("too short to be an OpenFlow message")
+    return struct.unpack_from("!BBHL", buf)
+
+def parse_message(buf):
+    msg_ver, msg_type, msg_len, msg_xid = parse_header(buf)
+    if msg_ver != const.OFP_VERSION and msg_type != const.OFPT_HELLO:
+        raise loxi.ProtocolError("wrong OpenFlow version (expected %d, got %d)" % (const.OFP_VERSION, msg_ver))
+    if len(buf) != msg_len:
+        raise loxi.ProtocolError("incorrect message size")
+    return message.unpack(loxi.generic_util.OFReader(buf))
diff --git a/py_gen/templates/_ofclass.py b/py_gen/templates/_ofclass.py
index ff8b21b..dfcd1fc 100644
--- a/py_gen/templates/_ofclass.py
+++ b/py_gen/templates/_ofclass.py
@@ -1,15 +1,34 @@
-:: nonskip_members = [m for m in ofclass.members if not m.skip]
-class ${ofclass.pyname}(${superclass}):
-:: for m in ofclass.type_members:
+:: superclass_pyname = ofclass.superclass.pyname if ofclass.superclass else "loxi.OFObject"
+:: from loxi_ir import *
+:: import py_gen.oftype
+:: import py_gen.util as util
+:: type_members = [m for m in ofclass.members if type(m) == OFTypeMember]
+:: normal_members = [m for m in ofclass.members if type(m) == OFDataMember or
+::                                                 type(m) == OFDiscriminatorMember]
+:: if ofclass.virtual:
+:: discriminator_fmts = { 1: "B", 2: "!H", 4: "!L" }
+:: discriminator_fmt = discriminator_fmts[ofclass.discriminator.length]
+:: #endif
+class ${ofclass.pyname}(${superclass_pyname}):
+:: if ofclass.virtual:
+    subtypes = {}
+
+:: #endif
+:: for m in type_members:
     ${m.name} = ${m.value}
 :: #endfor
 
-    def __init__(${', '.join(['self'] + ["%s=None" % m.name for m in nonskip_members])}):
-:: for m in nonskip_members:
+    def __init__(${', '.join(['self'] + ["%s=None" % m.name for m in normal_members])}):
+:: for m in normal_members:
         if ${m.name} != None:
             self.${m.name} = ${m.name}
         else:
-            self.${m.name} = ${m.oftype.gen_init_expr()}
+:: if m.name == 'xid':
+:: # HACK for message xid
+            self.${m.name} = None
+:: else:
+            self.${m.name} = ${py_gen.oftype.gen_init_expr(m.oftype, version=version)}
+:: #endif
 :: #endfor
         return
 
@@ -19,24 +38,31 @@
         return ''.join(packed)
 
     @staticmethod
-    def unpack(buf):
+    def unpack(reader):
+:: if ofclass.virtual:
+        subtype, = reader.peek(${repr(discriminator_fmt)}, ${ofclass.discriminator.offset})
+        subclass = ${ofclass.pyname}.subtypes.get(subtype)
+        if subclass:
+            return subclass.unpack(reader)
+
+:: #endif
         obj = ${ofclass.pyname}()
 :: include("_unpack.py", ofclass=ofclass)
         return obj
 
     def __eq__(self, other):
         if type(self) != type(other): return False
-:: for m in nonskip_members:
+:: for m in normal_members:
         if self.${m.name} != other.${m.name}: return False
 :: #endfor
         return True
 
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def show(self):
-        import loxi.pp
-        return loxi.pp.pp(self)
-
     def pretty_print(self, q):
 :: include('_pretty_print.py', ofclass=ofclass)
+
+:: # Register with our superclass
+:: if ofclass.superclass:
+:: type_field_name = ofclass.superclass.discriminator.name
+:: type_value = ofclass.member_by_name(type_field_name).value
+${superclass_pyname}.subtypes[${type_value}] = ${ofclass.pyname}
+:: #endif
diff --git a/py_gen/templates/_pack.py b/py_gen/templates/_pack.py
index 0d50a38..0525ef2 100644
--- a/py_gen/templates/_pack.py
+++ b/py_gen/templates/_pack.py
@@ -26,22 +26,42 @@
 :: # under the EPL.
 ::
 :: # TODO coalesce format strings
-:: all_members = ofclass.members[:]
-:: if ofclass.length_member: all_members.append(ofclass.length_member)
-:: all_members.extend(ofclass.type_members)
-:: all_members.sort(key=lambda x: x.offset)
+:: from loxi_ir import *
+:: from py_gen.oftype import gen_pack_expr
+:: length_member = None
 :: length_member_index = None
+:: field_length_members = {}
+:: field_length_indexes = {}
 :: index = 0
-:: for m in all_members:
-::     if m == ofclass.length_member:
+:: for m in ofclass.members:
+::     if type(m) == OFLengthMember:
+::         length_member = m
 ::         length_member_index = index
-        packed.append(${m.oftype.gen_pack_expr('0')}) # placeholder for ${m.name} at index ${length_member_index}
+        packed.append(${gen_pack_expr(m.oftype, '0', version=version)}) # placeholder for ${m.name} at index ${index}
+::     elif type(m) == OFFieldLengthMember:
+::         field_length_members[m.field_name] = m
+::         field_length_indexes[m.field_name] = index
+        packed.append(${gen_pack_expr(m.oftype, '0', version=version)}) # placeholder for ${m.name} at index ${index}
+::     elif type(m) == OFPadMember:
+        packed.append('\x00' * ${m.length})
 ::     else:
-        packed.append(${m.oftype.gen_pack_expr('self.' + m.name)})
+        packed.append(${gen_pack_expr(m.oftype, 'self.' + m.name, version=version)})
+::         if m.name in field_length_members:
+::             field_length_member = field_length_members[m.name]
+::             field_length_index = field_length_indexes[m.name]
+        packed[${field_length_index}] = ${gen_pack_expr(field_length_member.oftype, 'len(packed[-1])', version=version)}
+::         #endif
 ::     #endif
 ::     index += 1
 :: #endfor
 :: if length_member_index != None:
         length = sum([len(x) for x in packed])
-        packed[${length_member_index}] = ${ofclass.length_member.oftype.gen_pack_expr('length')}
+:: if ofclass.has_internal_alignment:
+        packed.append(loxi.generic_util.pad_to(8, length))
+        length += len(packed[-1])
+:: #endif
+        packed[${length_member_index}] = ${gen_pack_expr(length_member.oftype, 'length', version=version)}
+:: #endif
+:: if ofclass.has_external_alignment:
+        packed.append(loxi.generic_util.pad_to(8, length))
 :: #endif
diff --git a/py_gen/templates/_pretty_print.py b/py_gen/templates/_pretty_print.py
index 604cd94..c8c7770 100644
--- a/py_gen/templates/_pretty_print.py
+++ b/py_gen/templates/_pretty_print.py
@@ -24,14 +24,17 @@
 :: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
+:: from loxi_globals import OFVersions
+:: import loxi_utils.loxi_utils as loxi_utils
 ::
         q.text("${ofclass.pyname} {")
         with q.group():
             with q.indent(2):
                 q.breakable()
+:: from loxi_ir import *
+:: normal_members = [m for m in ofclass.members if type(m) == OFDataMember]
 :: first = True
-:: for m in ofclass.members:
-:: if m.name == 'actions_len': continue
+:: for m in normal_members:
 :: if not first:
                 q.text(","); q.breakable()
 :: else:
@@ -43,15 +46,15 @@
                     q.text("%#x" % self.${m.name})
                 else:
                     q.text('None')
-:: elif m.oftype.base == 'of_mac_addr_t':
+:: elif m.oftype == 'of_mac_addr_t':
                 q.text(util.pretty_mac(self.${m.name}))
-:: elif m.oftype.base == 'uint32_t' and m.name.startswith("ipv4"):
+:: elif m.oftype == 'of_ipv4_t':
                 q.text(util.pretty_ipv4(self.${m.name}))
-:: elif m.oftype.base == 'of_wc_bmap_t':
+:: elif m.oftype == 'of_wc_bmap_t' and version in OFVersions.from_strings("1.0", "1.1"):
                 q.text(util.pretty_wildcards(self.${m.name}))
-:: elif m.oftype.base == 'of_port_no_t':
+:: elif m.oftype == 'of_port_no_t':
                 q.text(util.pretty_port(self.${m.name}))
-:: elif m.oftype.base.startswith("uint") and not m.oftype.is_array:
+:: elif loxi_utils.lookup_ir_wiretype(m.oftype, version=version).startswith("uint"):
                 q.text("%#x" % self.${m.name})
 :: else:
                 q.pp(self.${m.name})
diff --git a/py_gen/templates/_unpack.py b/py_gen/templates/_unpack.py
index 173ebb5..cce9de3 100644
--- a/py_gen/templates/_unpack.py
+++ b/py_gen/templates/_unpack.py
@@ -26,24 +26,31 @@
 :: # under the EPL.
 ::
 :: # TODO coalesce format strings
-:: all_members = ofclass.members[:]
-:: if ofclass.length_member: all_members.append(ofclass.length_member)
-:: all_members.extend(ofclass.type_members)
-:: all_members.sort(key=lambda x: x.offset)
-:: for m in all_members:
-::     unpack_expr = m.oftype.gen_unpack_expr('buf', m.offset)
-::     if m == ofclass.length_member:
-        _length = ${unpack_expr}
-        assert(_length == len(buf))
-:: if ofclass.is_fixed_length:
-        if _length != ${ofclass.min_length}: raise loxi.ProtocolError("${ofclass.pyname} length is %d, should be ${ofclass.min_length}" % _length)
-:: else:
-        if _length < ${ofclass.min_length}: raise loxi.ProtocolError("${ofclass.pyname} length is %d, should be at least ${ofclass.min_length}" % _length)
-:: #endif
-::     elif m in ofclass.type_members:
-        ${m.name} = ${unpack_expr}
-        assert(${m.name} == ${m.value})
-::     else:
-        obj.${m.name} = ${unpack_expr}
+:: from loxi_ir import *
+:: from py_gen.oftype import gen_unpack_expr
+:: field_length_members = {}
+:: for m in ofclass.members:
+::     if type(m) == OFPadMember:
+        reader.skip(${m.length})
+::     elif type(m) == OFLengthMember:
+        _${m.name} = ${gen_unpack_expr(m.oftype, 'reader', version=version)}
+        orig_reader = reader
+        reader = orig_reader.slice(_${m.name} - (${m.offset} + ${m.length}))
+::     elif type(m) == OFFieldLengthMember:
+::         field_length_members[m.field_name] = m
+        _${m.name} = ${gen_unpack_expr(m.oftype, 'reader', version=version)}
+::     elif type(m) == OFTypeMember:
+        _${m.name} = ${gen_unpack_expr(m.oftype, 'reader', version=version)}
+        assert(_${m.name} == ${m.value})
+::     elif type(m) == OFDataMember or type(m) == OFDiscriminatorMember:
+::         if m.name in field_length_members:
+::             reader_expr = 'reader.slice(_%s)' % field_length_members[m.name].name
+::         else:
+::             reader_expr = 'reader'
+::         #endif
+        obj.${m.name} = ${gen_unpack_expr(m.oftype, reader_expr, version=version)}
 ::     #endif
 :: #endfor
+:: if ofclass.has_external_alignment:
+        orig_reader.skip_align()
+:: #endif
diff --git a/py_gen/templates/action.py b/py_gen/templates/action.py
deleted file mode 100644
index 089cc53..0000000
--- a/py_gen/templates/action.py
+++ /dev/null
@@ -1,99 +0,0 @@
-:: # Copyright 2013, Big Switch Networks, Inc.
-:: #
-:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
-:: # the following special exception:
-:: #
-:: # LOXI Exception
-:: #
-:: # As a special exception to the terms of the EPL, you may distribute libraries
-:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
-:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
-:: # from the LoxiGen Libraries and the notice provided below is (i) included in
-:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
-:: # documentation for the LoxiGen Libraries, if distributed in binary form.
-:: #
-:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
-:: #
-:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
-:: # a copy of the EPL at:
-:: #
-:: # http://www.eclipse.org/legal/epl-v10.html
-:: #
-:: # Unless required by applicable law or agreed to in writing, software
-:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-:: # EPL for the specific language governing permissions and limitations
-:: # under the EPL.
-::
-:: import itertools
-:: include('_copyright.py')
-
-:: include('_autogen.py')
-
-import struct
-import const
-import util
-import loxi
-
-def unpack_list(buf):
-    if len(buf) % 8 != 0: raise loxi.ProtocolError("action list length not a multiple of 8")
-    def deserializer(buf):
-        type, length = struct.unpack_from("!HH", buf)
-        if length % 8 != 0: raise loxi.ProtocolError("action length not a multiple of 8")
-        parser = parsers.get(type)
-        if not parser: raise loxi.ProtocolError("unknown action type %d" % type)
-        return parser(buf)
-    return util.unpack_list(deserializer, "!2xH", buf)
-
-class Action(object):
-    type = None # override in subclass
-    pass
-
-:: for ofclass in ofclasses:
-:: include('_ofclass.py', ofclass=ofclass, superclass="Action")
-
-:: #endfor
-
-def parse_vendor(buf):
-    if len(buf) < 16:
-        raise loxi.ProtocolError("experimenter action too short")
-
-    experimenter, = struct.unpack_from("!L", buf, 4)
-    if experimenter == 0x005c16c7: # Big Switch Networks
-        subtype, = struct.unpack_from("!L", buf, 8)
-    elif experimenter == 0x00002320: # Nicira
-        subtype, = struct.unpack_from("!H", buf, 8)
-    else:
-        raise loxi.ProtocolError("unexpected experimenter id %#x" % experimenter)
-
-    if subtype in experimenter_parsers[experimenter]:
-        return experimenter_parsers[experimenter][subtype](buf)
-    else:
-        raise loxi.ProtocolError("unexpected BSN experimenter subtype %#x" % subtype)
-
-parsers = {
-:: sort_key = lambda x: x.type_members[0].value
-:: msgtype_groups = itertools.groupby(sorted(ofclasses, key=sort_key), sort_key)
-:: for (k, v) in msgtype_groups:
-:: v = list(v)
-:: if len(v) == 1:
-    ${k} : ${v[0].pyname}.unpack,
-:: else:
-    ${k} : parse_${k[12:].lower()},
-:: #endif
-:: #endfor
-}
-
-:: experimenter_ofclasses = [x for x in ofclasses if x.type_members[0].value == 'const.OFPAT_VENDOR']
-:: sort_key = lambda x: x.type_members[1].value
-:: experimenter_ofclasses.sort(key=sort_key)
-:: grouped = itertools.groupby(experimenter_ofclasses, sort_key)
-experimenter_parsers = {
-:: for (experimenter, v) in grouped:
-    ${experimenter} : {
-:: for ofclass in v:
-        ${ofclass.type_members[2].value}: ${ofclass.pyname}.unpack,
-:: #endfor
-    },
-:: #endfor
-}
diff --git a/py_gen/templates/const.py b/py_gen/templates/const.py
index ef09585..d1c00f9 100644
--- a/py_gen/templates/const.py
+++ b/py_gen/templates/const.py
@@ -24,6 +24,7 @@
 :: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
+:: from loxi_globals import OFVersions
 ::
 :: blacklisted_map_groups = ['macro_definitions']
 :: blacklisted_map_idents = ['OFPFW_NW_DST_BITS', 'OFPFW_NW_SRC_BITS',
@@ -34,13 +35,12 @@
 
 :: include('_autogen.py')
 
-OFP_VERSION = ${version}
+OFP_VERSION = ${version.wire_version}
 
-:: for (group, idents) in sorted(groups.items()):
-::    idents.sort(key=lambda (ident, value): value)
-# Identifiers from group ${group}
-::    for (ident, value) in idents:
-::        if version == 1 and ident.startswith('OFPP_'):
+:: for enum in sorted(enums, key=lambda enum: enum.name):
+# Identifiers from group ${enum.name}
+::    for (ident, value) in enum.values:
+::        if version == OFVersions.VERSION_1_0 and ident.startswith('OFPP_'):
 ::        # HACK loxi converts these to 32-bit
 ${ident} = ${"%#x" % (value & 0xffff)}
 ::        else:
@@ -48,12 +48,12 @@
 ::        #endif
 ::    #endfor
 
-::    if group not in blacklisted_map_groups:
-${group}_map = {
-::        for (ident, value) in idents:
+::    if enum.name not in blacklisted_map_groups:
+${enum.name}_map = {
+::        for (ident, value) in enum.values:
 ::            if ident in blacklisted_map_idents:
 ::                pass
-::            elif version == 1 and ident.startswith('OFPP_'):
+::            elif version == OFVersions.VERSION_1_0 and ident.startswith('OFPP_'):
 ::                # HACK loxi converts these to 32-bit
     ${"%#x" % (value & 0xffff)}: ${repr(ident)},
 ::        else:
diff --git a/py_gen/templates/generic_util.py b/py_gen/templates/generic_util.py
new file mode 100644
index 0000000..039d82a
--- /dev/null
+++ b/py_gen/templates/generic_util.py
@@ -0,0 +1,122 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+::
+:: include('_copyright.py')
+"""
+Utility functions independent of the protocol version
+"""
+
+:: include('_autogen.py')
+
+import loxi
+import struct
+
+def pack_list(values):
+    return "".join([x.pack() for x in values])
+
+def unpack_list(reader, deserializer):
+    """
+    The deserializer function should take an OFReader and return the new object.
+    """
+    entries = []
+    while not reader.is_empty():
+        entries.append(deserializer(reader))
+    return entries
+
+def pad_to(alignment, length):
+    """
+    Return a string of zero bytes that will pad a string of length 'length' to
+    a multiple of 'alignment'.
+    """
+    return "\x00" * ((length + alignment - 1)/alignment*alignment - length)
+
+class OFReader(object):
+    """
+    Cursor over a read-only buffer
+
+    OpenFlow messages are best thought of as a sequence of elements of
+    variable size, rather than a C-style struct with fixed offsets and
+    known field lengths. This class supports efficiently reading
+    fields sequentially and is intended to be used recursively by the
+    parsers of child objects which will implicitly update the offset.
+
+    buf: buffer object
+    start: initial position in the buffer
+    length: number of bytes after start
+    offset: distance from start
+    """
+    def __init__(self, buf, start=0, length=None):
+        self.buf = buf
+        self.start = start
+        if length is None:
+            self.length = len(buf) - start
+        else:
+            self.length = length
+        self.offset = 0
+
+    def read(self, fmt):
+        st = struct.Struct(fmt)
+        if self.offset + st.size > self.length:
+            raise loxi.ProtocolError("Buffer too short")
+        result = st.unpack_from(self.buf, self.start+self.offset)
+        self.offset += st.size
+        return result
+
+    def read_all(self):
+        s = self.buf[(self.start+self.offset):(self.start+self.length)]
+        assert(len(s) == self.length - self.offset)
+        self.offset = self.length
+        return s
+
+    def peek(self, fmt, offset=0):
+        st = struct.Struct(fmt)
+        if self.offset + offset + st.size > self.length:
+            raise loxi.ProtocolError("Buffer too short")
+        result = st.unpack_from(self.buf, self.start + self.offset + offset)
+        return result
+
+    def skip(self, length):
+        if self.offset + length > self.length:
+            raise loxi.ProtocolError("Buffer too short")
+        self.offset += length
+
+    def skip_align(self):
+        new_offset = ((self.start + self.offset + 7) / 8 * 8) - self.start
+        if new_offset > self.length:
+            raise loxi.ProtocolError("Buffer too short")
+        self.offset = new_offset
+
+    def is_empty(self):
+        return self.offset == self.length
+
+    # Used when parsing objects that have their own length fields
+    def slice(self, length):
+        if self.offset + length > self.length:
+            raise loxi.ProtocolError("Buffer too short")
+        reader = OFReader(self.buf, self.start + self.offset, length)
+        self.offset += length
+        return reader
diff --git a/py_gen/templates/init.py b/py_gen/templates/init.py
index abc8d70..3b73baa 100644
--- a/py_gen/templates/init.py
+++ b/py_gen/templates/init.py
@@ -30,6 +30,16 @@
 :: include('_autogen.py')
 
 import action, common, const, message
+:: if version >= 2:
+import instruction
+:: #endif
+:: if version >= 3:
+import oxm
+:: #endif
+:: if version >= 4:
+import meter_band
+import bsn_tlv
+:: #endif
 from const import *
 from common import *
 from loxi import ProtocolError
diff --git a/py_gen/templates/message.py b/py_gen/templates/message.py
deleted file mode 100644
index 71a2871..0000000
--- a/py_gen/templates/message.py
+++ /dev/null
@@ -1,219 +0,0 @@
-:: # Copyright 2013, Big Switch Networks, Inc.
-:: #
-:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
-:: # the following special exception:
-:: #
-:: # LOXI Exception
-:: #
-:: # As a special exception to the terms of the EPL, you may distribute libraries
-:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
-:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
-:: # from the LoxiGen Libraries and the notice provided below is (i) included in
-:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
-:: # documentation for the LoxiGen Libraries, if distributed in binary form.
-:: #
-:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
-:: #
-:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
-:: # a copy of the EPL at:
-:: #
-:: # http::: #www.eclipse.org/legal/epl-v10.html
-:: #
-:: # Unless required by applicable law or agreed to in writing, software
-:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-:: # EPL for the specific language governing permissions and limitations
-:: # under the EPL.
-::
-:: import itertools
-:: include('_copyright.py')
-
-:: include('_autogen.py')
-
-import struct
-import loxi
-import const
-import common
-import action # for unpack_list
-import util
-
-class Message(object):
-    version = const.OFP_VERSION
-    type = None # override in subclass
-    xid = None
-
-:: for ofclass in ofclasses:
-:: nonskip_members = [m for m in ofclass.members if not m.skip]
-class ${ofclass.pyname}(Message):
-:: for m in ofclass.type_members:
-    ${m.name} = ${m.value}
-:: #endfor
-
-    def __init__(self, ${', '.join(["%s=None" % m.name for m in nonskip_members])}):
-        self.xid = xid
-:: for m in [x for x in nonskip_members if x.name != 'xid']:
-        if ${m.name} != None:
-            self.${m.name} = ${m.name}
-        else:
-            self.${m.name} = ${m.oftype.gen_init_expr()}
-:: #endfor
-
-    def pack(self):
-        packed = []
-:: if ofclass.name == 'of_packet_out':
-:: include('_pack_packet_out.py', ofclass=ofclass)
-:: else:
-:: include('_pack.py', ofclass=ofclass)
-:: #endif
-        return ''.join(packed)
-
-    @staticmethod
-    def unpack(buf):
-        if len(buf) < 8: raise loxi.ProtocolError("buffer too short to contain an OpenFlow message")
-        obj = ${ofclass.pyname}()
-:: if ofclass.name == 'of_packet_out':
-:: include('_unpack_packet_out.py', ofclass=ofclass)
-:: else:
-:: include('_unpack.py', ofclass=ofclass)
-:: #endif
-        return obj
-
-    def __eq__(self, other):
-        if type(self) != type(other): return False
-        if self.version != other.version: return False
-        if self.type != other.type: return False
-:: for m in nonskip_members:
-        if self.${m.name} != other.${m.name}: return False
-:: #endfor
-        return True
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def __str__(self):
-        return self.show()
-
-    def show(self):
-        import loxi.pp
-        return loxi.pp.pp(self)
-
-    def pretty_print(self, q):
-:: include('_pretty_print.py', ofclass=ofclass)
-
-:: #endfor
-
-def parse_header(buf):
-    if len(buf) < 8:
-        raise loxi.ProtocolError("too short to be an OpenFlow message")
-    return struct.unpack_from("!BBHL", buf)
-
-def parse_message(buf):
-    msg_ver, msg_type, msg_len, msg_xid = parse_header(buf)
-    if msg_ver != const.OFP_VERSION and msg_type != ofp.OFPT_HELLO:
-        raise loxi.ProtocolError("wrong OpenFlow version")
-    if len(buf) != msg_len:
-        raise loxi.ProtocolError("incorrect message size")
-    if msg_type in parsers:
-        return parsers[msg_type](buf)
-    else:
-        raise loxi.ProtocolError("unexpected message type")
-
-:: # TODO fix for OF 1.1+
-def parse_flow_mod(buf):
-    if len(buf) < 56 + 2:
-        raise loxi.ProtocolError("message too short")
-    cmd, = struct.unpack_from("!H", buf, 56)
-    if cmd in flow_mod_parsers:
-        return flow_mod_parsers[cmd](buf)
-    else:
-        raise loxi.ProtocolError("unexpected flow mod cmd %u" % cmd)
-
-def parse_stats_reply(buf):
-    if len(buf) < 8 + 2:
-        raise loxi.ProtocolError("message too short")
-    stats_type, = struct.unpack_from("!H", buf, 8)
-    if stats_type in stats_reply_parsers:
-        return stats_reply_parsers[stats_type](buf)
-    else:
-        raise loxi.ProtocolError("unexpected stats type %u" % stats_type)
-
-def parse_stats_request(buf):
-    if len(buf) < 8 + 2:
-        raise loxi.ProtocolError("message too short")
-    stats_type, = struct.unpack_from("!H", buf, 8)
-    if stats_type in stats_request_parsers:
-        return stats_request_parsers[stats_type](buf)
-    else:
-        raise loxi.ProtocolError("unexpected stats type %u" % stats_type)
-
-def parse_vendor(buf):
-    if len(buf) < 16:
-        raise loxi.ProtocolError("experimenter message too short")
-
-    experimenter, = struct.unpack_from("!L", buf, 8)
-    if experimenter == 0x005c16c7: # Big Switch Networks
-        subtype, = struct.unpack_from("!L", buf, 12)
-    elif experimenter == 0x00002320: # Nicira
-        subtype, = struct.unpack_from("!L", buf, 12)
-    else:
-        raise loxi.ProtocolError("unexpected experimenter id %#x" % experimenter)
-
-    if subtype in experimenter_parsers[experimenter]:
-        return experimenter_parsers[experimenter][subtype](buf)
-    else:
-        raise loxi.ProtocolError("unexpected experimenter %#x subtype %#x" % (experimenter, subtype))
-
-parsers = {
-:: sort_key = lambda x: x.type_members[1].value
-:: msgtype_groups = itertools.groupby(sorted(ofclasses, key=sort_key), sort_key)
-:: for (k, v) in msgtype_groups:
-:: v = list(v)
-:: if len(v) == 1:
-    ${k} : ${v[0].pyname}.unpack,
-:: else:
-    ${k} : parse_${k[11:].lower()},
-:: #endif
-:: #endfor
-}
-
-flow_mod_parsers = {
-    const.OFPFC_ADD : flow_add.unpack,
-    const.OFPFC_MODIFY : flow_modify.unpack,
-    const.OFPFC_MODIFY_STRICT : flow_modify_strict.unpack,
-    const.OFPFC_DELETE : flow_delete.unpack,
-    const.OFPFC_DELETE_STRICT : flow_delete_strict.unpack,
-}
-
-stats_reply_parsers = {
-    const.OFPST_DESC : desc_stats_reply.unpack,
-    const.OFPST_FLOW : flow_stats_reply.unpack,
-    const.OFPST_AGGREGATE : aggregate_stats_reply.unpack,
-    const.OFPST_TABLE : table_stats_reply.unpack,
-    const.OFPST_PORT : port_stats_reply.unpack,
-    const.OFPST_QUEUE : queue_stats_reply.unpack,
-    const.OFPST_VENDOR : experimenter_stats_reply.unpack,
-}
-
-stats_request_parsers = {
-    const.OFPST_DESC : desc_stats_request.unpack,
-    const.OFPST_FLOW : flow_stats_request.unpack,
-    const.OFPST_AGGREGATE : aggregate_stats_request.unpack,
-    const.OFPST_TABLE : table_stats_request.unpack,
-    const.OFPST_PORT : port_stats_request.unpack,
-    const.OFPST_QUEUE : queue_stats_request.unpack,
-    const.OFPST_VENDOR : experimenter_stats_request.unpack,
-}
-
-:: experimenter_ofclasses = [x for x in ofclasses if x.type_members[1].value == 'const.OFPT_VENDOR']
-:: sort_key = lambda x: x.type_members[2].value
-:: experimenter_ofclasses.sort(key=sort_key)
-:: grouped = itertools.groupby(experimenter_ofclasses, sort_key)
-experimenter_parsers = {
-:: for (experimenter, v) in grouped:
-    ${experimenter} : {
-:: for ofclass in v:
-        ${ofclass.type_members[3].value}: ${ofclass.pyname}.unpack,
-:: #endfor
-    },
-:: #endfor
-}
diff --git a/py_gen/templates/_pack_packet_out.py b/py_gen/templates/module.py
similarity index 67%
copy from py_gen/templates/_pack_packet_out.py
copy to py_gen/templates/module.py
index ad8b827..dfe23e8 100644
--- a/py_gen/templates/_pack_packet_out.py
+++ b/py_gen/templates/module.py
@@ -25,15 +25,37 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        packed.append(struct.pack("!B", self.version))
-        packed.append(struct.pack("!B", self.type))
-        packed.append(struct.pack("!H", 0)) # placeholder for length at index 3
-        packed.append(struct.pack("!L", self.xid))
-        packed.append(struct.pack("!L", self.buffer_id))
-        packed.append(struct.pack("!H", self.in_port))
-        packed_actions = "".join([x.pack() for x in self.actions])
-        packed.append(struct.pack("!H", len(packed_actions)))
-        packed.append(packed_actions)
-        packed.append(self.data)
-        length = sum([len(x) for x in packed])
-        packed[2] = struct.pack("!H", length)
+:: from loxi_globals import OFVersions
+:: import py_gen.oftype
+:: include('_copyright.py')
+
+:: include('_autogen.py')
+
+import struct
+import loxi
+import const
+import common
+import action
+:: if version >= OFVersions.VERSION_1_1:
+import instruction
+:: #endif
+:: if version >= OFVersions.VERSION_1_2:
+import oxm
+:: #endif
+:: if version >= OFVersions.VERSION_1_3:
+import action_id
+import instruction_id
+import meter_band
+import bsn_tlv
+:: #endif
+import util
+import loxi.generic_util
+
+:: for ofclass in ofclasses:
+:: include('_ofclass.py', ofclass=ofclass)
+
+:: #endfor
+
+:: if 'extra_template' in locals():
+:: include(extra_template)
+:: #endif
diff --git a/py_gen/templates/toplevel_init.py b/py_gen/templates/toplevel_init.py
index 3151404..e5493a5 100644
--- a/py_gen/templates/toplevel_init.py
+++ b/py_gen/templates/toplevel_init.py
@@ -26,9 +26,15 @@
 :: # under the EPL.
 ::
 :: include('_copyright.py')
-
+:: import loxi_globals
 :: include('_autogen.py')
 
+version_names = {
+:: for v in loxi_globals.OFVersions.all_supported:
+    ${v.wire_version}: "${v.version}",
+:: #endfor
+}
+
 def protocol(ver):
     """
     Import and return the protocol module for the given wire version.
@@ -53,3 +59,26 @@
     Raised when failing to deserialize an invalid OpenFlow message.
     """
     pass
+
+class Unimplemented(Exception):
+    """
+    Raised when an OpenFlow feature is not yet implemented in PyLoxi.
+    """
+    pass
+
+def unimplemented(msg):
+    raise Unimplemented(msg)
+
+class OFObject(object):
+    """
+    Superclass of all OpenFlow classes
+    """
+    def __init__(self, *args):
+        raise NotImplementedError("cannot instantiate abstract class")
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def show(self):
+        import loxi.pp
+        return loxi.pp.pp(self)
diff --git a/py_gen/templates/util.py b/py_gen/templates/util.py
index fead78f..85181dc 100644
--- a/py_gen/templates/util.py
+++ b/py_gen/templates/util.py
@@ -26,41 +26,25 @@
 :: # under the EPL.
 ::
 :: include('_copyright.py')
-
+:: from loxi_globals import OFVersions
 :: include('_autogen.py')
 
+import struct
 import loxi
 import const
-import struct
-
-def unpack_array(deserializer, element_size, buf):
-    """
-    Deserialize an array of fixed length elements.
-    The deserializer function should take a buffer and return the new object.
-    """
-    if len(buf) % element_size != 0: raise loxi.ProtocolError("invalid array length")
-    n = len(buf) / element_size
-    return [deserializer(buffer(buf, i*element_size, element_size)) for i in range(n)]
-
-def unpack_list(deserializer, length_fmt, buf):
-    """
-    Deserialize a list of variable-length entries.
-    'length_fmt' is a struct format string with exactly one non-padding format
-    character that returns the length of the given element.
-    The deserializer function should take a buffer and return the new object.
-    """
-    entries = []
-    offset = 0
-    length_struct = struct.Struct(length_fmt)
-    n = len(buf)
-    while offset < n:
-        if offset + length_struct.size > len(buf): raise loxi.ProtocolError("entry header overruns list length")
-        length, = length_struct.unpack_from(buf, offset)
-        if length < length_struct.size: raise loxi.ProtocolError("entry length is less than the header length")
-        if offset + length > len(buf): raise loxi.ProtocolError("entry length overruns list length")
-        entries.append(deserializer(buffer(buf, offset, length)))
-        offset += length
-    return entries
+import common
+import action
+:: if version >= OFVersions.VERSION_1_1:
+import instruction
+:: #endif
+:: if version >= OFVersions.VERSION_1_2:
+import oxm
+:: #endif
+:: if version >= OFVersions.VERSION_1_3:
+import action_id
+import instruction_id
+import meter_band
+:: #endif
 
 def pretty_mac(mac):
     return ':'.join(["%02x" % x for x in mac])
@@ -81,6 +65,7 @@
         set_flags.append("%#x" % v)
     return '|'.join(set_flags) or '0'
 
+:: if version in (OFVersions.VERSION_1_0, OFVersions.VERSION_1_1):
 def pretty_wildcards(v):
     if v == const.OFPFW_ALL:
         return 'OFPFW_ALL'
@@ -89,6 +74,7 @@
                   'OFPFW_NW_SRC_MASK', 'OFPFW_NW_DST_MASK', 'OFPFW_DL_VLAN_PCP',
                   'OFPFW_NW_TOS']
     return pretty_flags(v, flag_names)
+:: #endif
 
 def pretty_port(v):
     named_ports = [(k,v2) for (k,v2) in const.__dict__.iteritems() if k.startswith('OFPP_')]
@@ -96,3 +82,100 @@
         if v == v2:
             return k
     return v
+
+def pack_port_no(value):
+:: if version == OFVersions.VERSION_1_0:
+    return struct.pack("!H", value)
+:: else:
+    return struct.pack("!L", value)
+:: #endif
+
+def unpack_port_no(reader):
+:: if version == OFVersions.VERSION_1_0:
+    return reader.read("!H")[0]
+:: else:
+    return reader.read("!L")[0]
+:: #endif
+
+def pack_fm_cmd(value):
+:: if version == OFVersions.VERSION_1_0:
+    return struct.pack("!H", value)
+:: else:
+    return struct.pack("!B", value)
+:: #endif
+
+def unpack_fm_cmd(reader):
+:: if version == OFVersions.VERSION_1_0:
+    return reader.read("!H")[0]
+:: else:
+    return reader.read("!B")[0]
+:: #endif
+
+def init_wc_bmap():
+:: if version <= OFVersions.VERSION_1_1:
+    return const.OFPFW_ALL
+:: else:
+    return 0
+:: #endif
+
+def pack_wc_bmap(value):
+:: if version <= OFVersions.VERSION_1_1:
+    return struct.pack("!L", value)
+:: else:
+    return struct.pack("!Q", value)
+:: #endif
+
+def unpack_wc_bmap(reader):
+:: if version <= OFVersions.VERSION_1_1:
+    return reader.read("!L")[0]
+:: else:
+    return reader.read("!Q")[0]
+:: #endif
+
+def init_match_bmap():
+:: if version <= OFVersions.VERSION_1_1:
+    return const.OFPFW_ALL
+:: else:
+    return 0
+:: #endif
+
+def pack_match_bmap(value):
+:: if version <= OFVersions.VERSION_1_1:
+    return struct.pack("!L", value)
+:: else:
+    return struct.pack("!Q", value)
+:: #endif
+
+def unpack_match_bmap(reader):
+:: if version <= OFVersions.VERSION_1_1:
+    return reader.read("!L")[0]
+:: else:
+    return reader.read("!Q")[0]
+:: #endif
+
+MASK64 = (1 << 64) - 1
+
+def pack_bitmap_128(value):
+    x = 0l
+    for y in value:
+        x |= 1 << y
+    return struct.pack("!QQ", (x >> 64) & MASK64, x & MASK64)
+
+def unpack_bitmap_128(reader):
+    hi, lo = reader.read("!QQ")
+    x = (hi << 64) | lo
+    i = 0
+    value = set()
+    while x != 0:
+        if x & 1 == 1:
+            value.add(i)
+        i += 1
+        x >>= 1
+    return value
+
+def pack_checksum_128(value):
+    return struct.pack("!QQ", (value >> 64) & MASK64, value & MASK64)
+
+def unpack_checksum_128(reader):
+    hi, lo = reader.read("!QQ")
+    return (hi << 64) | lo
diff --git a/py_gen/tests/generic_util.py b/py_gen/tests/generic_util.py
new file mode 100644
index 0000000..8b2f59f
--- /dev/null
+++ b/py_gen/tests/generic_util.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+import unittest
+
+try:
+    import loxi
+    import loxi.generic_util
+    from loxi.generic_util import OFReader
+except ImportError:
+    exit("loxi package not found. Try setting PYTHONPATH.")
+
+class TestUnpackList(unittest.TestCase):
+    def test_simple(self):
+        def deserializer(reader):
+            length, = reader.peek("!B")
+            return reader.read('!%ds' % length)[0]
+        reader = loxi.generic_util.OFReader("\x04abc\x03de\x02f\x01")
+        a = loxi.generic_util.unpack_list(reader, deserializer)
+        self.assertEquals(['\x04abc', '\x03de', '\x02f', '\x01'], a)
+
+class TestOFReader(unittest.TestCase):
+    def test_empty(self):
+        reader = OFReader("")
+        self.assertEquals(str(reader.read('')), "")
+        with self.assertRaisesRegexp(loxi.ProtocolError, "Buffer too short"):
+            reader.read_buf(1)
+
+    def test_simple(self):
+        reader = OFReader("abcdefg")
+        self.assertEquals(reader.read('2s')[0], "ab")
+        self.assertEquals(reader.read('2s')[0], "cd")
+        self.assertEquals(reader.read('3s')[0], "efg")
+        with self.assertRaisesRegexp(loxi.ProtocolError, "Buffer too short"):
+            reader.read('s')
+
+    def test_skip(self):
+        reader = OFReader("abcdefg")
+        reader.skip(4)
+        self.assertEquals(reader.read('s')[0], "e")
+        with self.assertRaisesRegexp(loxi.ProtocolError, "Buffer too short"):
+            reader.skip(3)
+
+    def test_empty(self):
+        reader = OFReader("abcdefg")
+        self.assertEquals(reader.is_empty(), False)
+        reader.skip(6)
+        self.assertEquals(reader.is_empty(), False)
+        reader.skip(1)
+        self.assertEquals(reader.is_empty(), True)
+        with self.assertRaisesRegexp(loxi.ProtocolError, "Buffer too short"):
+            reader.skip(1)
+
+    def test_exception_effect(self):
+        reader = OFReader("abcdefg")
+        with self.assertRaisesRegexp(loxi.ProtocolError, "Buffer too short"):
+            reader.skip(8)
+        self.assertEquals(reader.is_empty(), False)
+        reader.skip(7)
+        self.assertEquals(reader.is_empty(), True)
+
+    def test_peek(self):
+        reader = OFReader("abcdefg")
+        self.assertEquals(reader.peek('2s')[0], "ab")
+        self.assertEquals(reader.peek('2s')[0], "ab")
+        self.assertEquals(reader.read('2s')[0], "ab")
+        self.assertEquals(reader.peek('2s')[0], "cd")
+        reader.skip(2)
+        self.assertEquals(reader.read('3s')[0], "efg")
+        with self.assertRaisesRegexp(loxi.ProtocolError, "Buffer too short"):
+            reader.peek('s')
+
+    def test_read_all(self):
+        reader = OFReader("abcdefg")
+        reader.skip(2)
+        self.assertEquals(reader.read_all(), "cdefg")
+        self.assertEquals(reader.read_all(), "")
+
+    def test_slice(self):
+        reader = OFReader("abcdefg")
+        reader.skip(2)
+        self.assertEquals(reader.slice(3).read_all(), "cde")
+        self.assertEquals(reader.slice(2).read_all(), "fg")
+        self.assertEquals(reader.is_empty(), True)
+
+    def test_skip_align(self):
+        reader = OFReader("abcd" + "efgh" + "ijkl" + "mnop" + "qr")
+        reader.skip_align()
+        self.assertEquals(reader.peek('2s')[0], 'ab')
+        self.assertEquals(reader.read('2s')[0], "ab")
+        reader.skip_align()
+        self.assertEquals(reader.peek('2s')[0], 'ij')
+        self.assertEquals(reader.read('2s')[0], 'ij')
+        child = reader.slice(8)
+        self.assertEquals(child.peek('2s')[0], 'kl')
+        child.skip_align()
+        self.assertEquals(child.peek('2s')[0], 'qr')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/py_gen/tests/of10.py b/py_gen/tests/of10.py
index 86f9b2b..7e10335 100644
--- a/py_gen/tests/of10.py
+++ b/py_gen/tests/of10.py
@@ -26,9 +26,13 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 import unittest
+import test_data
+from testutil import add_datafiles_tests
 
 try:
+    import loxi
     import loxi.of10 as ofp
+    from loxi.generic_util import OFReader
 except ImportError:
     exit("loxi package not found. Try setting PYTHONPATH.")
 
@@ -36,6 +40,7 @@
     def test_toplevel(self):
         import loxi
         self.assertTrue(hasattr(loxi, "ProtocolError"))
+        self.assertEquals(loxi.version_names[1], "1.0")
         ofp = loxi.protocol(1)
         self.assertEquals(ofp.OFP_VERSION, 1)
         self.assertTrue(hasattr(ofp, "action"))
@@ -54,23 +59,6 @@
         self.assertTrue(hasattr(loxi.of10, "message"))
 
 class TestActions(unittest.TestCase):
-    def test_output_pack(self):
-        expected = "\x00\x00\x00\x08\xff\xf8\xff\xff"
-        action = ofp.action.output(port=ofp.OFPP_IN_PORT, max_len=0xffff)
-        self.assertEquals(expected, action.pack())
-
-    def test_output_unpack(self):
-        # Normal case
-        buf = "\x00\x00\x00\x08\xff\xf8\xff\xff"
-        action = ofp.action.output.unpack(buf)
-        self.assertEqual(action.port, ofp.OFPP_IN_PORT)
-        self.assertEqual(action.max_len, 0xffff)
-
-        # Invalid length
-        buf = "\x00\x00\x00\x09\xff\xf8\xff\xff\x00"
-        with self.assertRaises(ofp.ProtocolError):
-            ofp.action.output.unpack(buf)
-
     def test_output_equality(self):
         action = ofp.action.output(port=1, max_len=0x1234)
         action2 = ofp.action.output(port=1, max_len=0x1234)
@@ -84,32 +72,6 @@
         self.assertNotEquals(action, action2)
         action2.max_len = 0x1234
 
-    def test_output_show(self):
-        action = ofp.action.output(port=1, max_len=0x1234)
-        expected = "output { port = 1, max_len = 0x1234 }"
-        self.assertEquals(expected, action.show())
-
-    def test_bsn_set_tunnel_dst_pack(self):
-        expected = ''.join([
-            "\xff\xff", "\x00\x10", # type/length
-            "\x00\x5c\x16\xc7", # experimenter
-            "\x00\x00\x00\x02", # subtype
-            "\x12\x34\x56\x78" # dst
-        ])
-        action = ofp.action.bsn_set_tunnel_dst(dst=0x12345678)
-        self.assertEquals(expected, action.pack())
-
-    def test_bsn_set_tunnel_dst_unpack(self):
-        buf = ''.join([
-            "\xff\xff", "\x00\x10", # type/length
-            "\x00\x5c\x16\xc7", # experimenter
-            "\x00\x00\x00\x02", # subtype
-            "\x12\x34\x56\x78" # dst
-        ])
-        action = ofp.action.bsn_set_tunnel_dst.unpack(buf)
-        self.assertEqual(action.subtype, 2)
-        self.assertEqual(action.dst, 0x12345678)
-
 # Assumes action serialization/deserialization works
 class TestActionList(unittest.TestCase):
     def test_normal(self):
@@ -126,34 +88,34 @@
         add(ofp.action.bsn_set_tunnel_dst(dst=0x12345678))
         add(ofp.action.nicira_dec_ttl())
 
-        actions = ofp.action.unpack_list(''.join(bufs))
+        actions = loxi.generic_util.unpack_list(OFReader(''.join(bufs)), ofp.action.action.unpack)
         self.assertEquals(actions, expected)
 
     def test_empty_list(self):
-        self.assertEquals(ofp.action.unpack_list(''), [])
+        self.assertEquals(loxi.generic_util.unpack_list(OFReader(''), ofp.action.action.unpack), [])
 
     def test_invalid_list_length(self):
         buf = '\x00' * 9
-        with self.assertRaisesRegexp(ofp.ProtocolError, 'not a multiple of 8'):
-            ofp.action.unpack_list(buf)
+        with self.assertRaisesRegexp(ofp.ProtocolError, 'Buffer too short'):
+            loxi.generic_util.unpack_list(OFReader(buf), ofp.action.action.unpack)
 
     def test_invalid_action_length(self):
         buf = '\x00' * 8
-        with self.assertRaisesRegexp(ofp.ProtocolError, 'is less than the header length'):
-            ofp.action.unpack_list(buf)
+        with self.assertRaisesRegexp(ofp.ProtocolError, 'Buffer too short'):
+            loxi.generic_util.unpack_list(OFReader(buf), ofp.action.action.unpack)
 
         buf = '\x00\x00\x00\x04'
-        with self.assertRaisesRegexp(ofp.ProtocolError, 'not a multiple of 8'):
-            ofp.action.unpack_list(buf)
+        with self.assertRaisesRegexp(ofp.ProtocolError, 'Buffer too short'):
+            loxi.generic_util.unpack_list(OFReader(buf), ofp.action.action.unpack)
 
         buf = '\x00\x00\x00\x10\x00\x00\x00\x00'
-        with self.assertRaisesRegexp(ofp.ProtocolError, 'overrun'):
-            ofp.action.unpack_list(buf)
+        with self.assertRaisesRegexp(ofp.ProtocolError, 'Buffer too short'):
+            loxi.generic_util.unpack_list(OFReader(buf), ofp.action.action.unpack)
 
-    def test_invalid_action_type(self):
+    def test_unknown_action_type(self):
         buf = '\xff\xfe\x00\x08\x00\x00\x00\x00'
-        with self.assertRaisesRegexp(ofp.ProtocolError, 'unknown action type'):
-            ofp.action.unpack_list(buf)
+        result = loxi.generic_util.unpack_list(OFReader(buf), ofp.action.action.unpack)
+        self.assertEquals(result, [ofp.action.action(type=0xfffe)])
 
 class TestConstants(unittest.TestCase):
     def test_ports(self):
@@ -163,159 +125,12 @@
         self.assertEquals(0xfc000, ofp.OFPFW_NW_DST_MASK)
 
 class TestCommon(unittest.TestCase):
-    def test_port_desc_pack(self):
-        obj = ofp.port_desc(port_no=ofp.OFPP_CONTROLLER,
-                            hw_addr=[1,2,3,4,5,6],
-                            name="foo",
-                            config=ofp.OFPPC_NO_FLOOD,
-                            state=ofp.OFPPS_STP_FORWARD,
-                            curr=ofp.OFPPF_10MB_HD,
-                            advertised=ofp.OFPPF_1GB_FD,
-                            supported=ofp.OFPPF_AUTONEG,
-                            peer=ofp.OFPPF_PAUSE_ASYM)
-        expected = ''.join([
-            '\xff\xfd', # port_no
-            '\x01\x02\x03\x04\x05\x06', # hw_addr
-            'foo'.ljust(16, '\x00'), # name
-            '\x00\x00\x00\x10', # config
-            '\x00\x00\x02\x00', # state
-            '\x00\x00\x00\x01', # curr
-            '\x00\x00\x00\x20', # advertised
-            '\x00\x00\x02\x00', # supported
-            '\x00\x00\x08\x00', # peer
-        ])
-        self.assertEquals(expected, obj.pack())
-
-    def test_port_desc_unpack(self):
-        buf = ''.join([
-            '\xff\xfd', # port_no
-            '\x01\x02\x03\x04\x05\x06', # hw_addr
-            'foo'.ljust(16, '\x00'), # name
-            '\x00\x00\x00\x10', # config
-            '\x00\x00\x02\x00', # state
-            '\x00\x00\x00\x01', # curr
-            '\x00\x00\x00\x20', # advertised
-            '\x00\x00\x02\x00', # supported
-            '\x00\x00\x08\x00', # peer
-        ])
-        obj = ofp.port_desc.unpack(buf)
-        self.assertEquals(ofp.OFPP_CONTROLLER, obj.port_no)
-        self.assertEquals('foo', obj.name)
-        self.assertEquals(ofp.OFPPF_PAUSE_ASYM, obj.peer)
-
-    def test_table_stats_entry_pack(self):
-        obj = ofp.table_stats_entry(table_id=3,
-                                    name="foo",
-                                    wildcards=ofp.OFPFW_ALL,
-                                    max_entries=5,
-                                    active_count=2,
-                                    lookup_count=1099511627775,
-                                    matched_count=9300233470495232273L)
-        expected = ''.join([
-            '\x03', # table_id
-            '\x00\x00\x00', # pad
-            'foo'.ljust(32, '\x00'), # name
-            '\x00\x3f\xFF\xFF', # wildcards
-            '\x00\x00\x00\x05', # max_entries
-            '\x00\x00\x00\x02', # active_count
-            '\x00\x00\x00\xff\xff\xff\xff\xff', # lookup_count
-            '\x81\x11\x11\x11\x11\x11\x11\x11', # matched_count
-        ])
-        self.assertEquals(expected, obj.pack())
-
-    def test_table_stats_entry_unpack(self):
-        buf = ''.join([
-            '\x03', # table_id
-            '\x00\x00\x00', # pad
-            'foo'.ljust(32, '\x00'), # name
-            '\x00\x3f\xFF\xFF', # wildcards
-            '\x00\x00\x00\x05', # max_entries
-            '\x00\x00\x00\x02', # active_count
-            '\x00\x00\x00\xff\xff\xff\xff\xff', # lookup_count
-            '\x81\x11\x11\x11\x11\x11\x11\x11', # matched_count
-        ])
-        obj = ofp.table_stats_entry.unpack(buf)
-        self.assertEquals(3, obj.table_id)
-        self.assertEquals('foo', obj.name)
-        self.assertEquals(9300233470495232273L, obj.matched_count)
-
-    def test_flow_stats_entry_pack(self):
-        obj = ofp.flow_stats_entry(table_id=3,
-                                   match=ofp.match(),
-                                   duration_sec=1,
-                                   duration_nsec=2,
-                                   priority=100,
-                                   idle_timeout=5,
-                                   hard_timeout=10,
-                                   cookie=0x0123456789abcdef,
-                                   packet_count=10,
-                                   byte_count=1000,
-                                   actions=[ofp.action.output(port=1),
-                                            ofp.action.output(port=2)])
-        expected = ''.join([
-            '\x00\x68', # length
-            '\x03', # table_id
-            '\x00', # pad
-            '\x00\x3f\xff\xff', # match.wildcards
-            '\x00' * 36, # remaining match fields
-            '\x00\x00\x00\x01', # duration_sec
-            '\x00\x00\x00\x02', # duration_nsec
-            '\x00\x64', # priority
-            '\x00\x05', # idle_timeout
-            '\x00\x0a', # hard_timeout
-            '\x00' * 6, # pad2
-            '\x01\x23\x45\x67\x89\xab\xcd\xef', # cookie
-            '\x00\x00\x00\x00\x00\x00\x00\x0a', # packet_count
-            '\x00\x00\x00\x00\x00\x00\x03\xe8', # byte_count
-            '\x00\x00', # actions[0].type
-            '\x00\x08', # actions[0].len
-            '\x00\x01', # actions[0].port
-            '\x00\x00', # actions[0].max_len
-            '\x00\x00', # actions[1].type
-            '\x00\x08', # actions[1].len
-            '\x00\x02', # actions[1].port
-            '\x00\x00', # actions[1].max_len
-        ])
-        self.assertEquals(expected, obj.pack())
-
-    def test_flow_stats_entry_unpack(self):
-        buf = ''.join([
-            '\x00\x68', # length
-            '\x03', # table_id
-            '\x00', # pad
-            '\x00\x3f\xff\xff', # match.wildcards
-            '\x00' * 36, # remaining match fields
-            '\x00\x00\x00\x01', # duration_sec
-            '\x00\x00\x00\x02', # duration_nsec
-            '\x00\x64', # priority
-            '\x00\x05', # idle_timeout
-            '\x00\x0a', # hard_timeout
-            '\x00' * 6, # pad2
-            '\x01\x23\x45\x67\x89\xab\xcd\xef', # cookie
-            '\x00\x00\x00\x00\x00\x00\x00\x0a', # packet_count
-            '\x00\x00\x00\x00\x00\x00\x03\xe8', # byte_count
-            '\x00\x00', # actions[0].type
-            '\x00\x08', # actions[0].len
-            '\x00\x01', # actions[0].port
-            '\x00\x00', # actions[0].max_len
-            '\x00\x00', # actions[1].type
-            '\x00\x08', # actions[1].len
-            '\x00\x02', # actions[1].port
-            '\x00\x00', # actions[1].max_len
-        ])
-        obj = ofp.flow_stats_entry.unpack(buf)
-        self.assertEquals(3, obj.table_id)
-        self.assertEquals(ofp.OFPFW_ALL, obj.match.wildcards)
-        self.assertEquals(2, len(obj.actions))
-        self.assertEquals(1, obj.actions[0].port)
-        self.assertEquals(2, obj.actions[1].port)
-
     def test_match(self):
         match = ofp.match()
         self.assertEquals(match.wildcards, ofp.OFPFW_ALL)
         self.assertEquals(match.tcp_src, 0)
         buf = match.pack()
-        match2 = ofp.match.unpack(buf)
+        match2 = ofp.match.unpack(OFReader(buf))
         self.assertEquals(match, match2)
 
 class TestMessages(unittest.TestCase):
@@ -332,44 +147,18 @@
         msg = ofp.message.hello(xid=0)
         self.assertEquals(msg.xid, 0)
 
-    def test_hello_unpack(self):
-        # Normal case
-        buf = "\x01\x00\x00\x08\x12\x34\x56\x78"
-        msg = ofp.message.hello(xid=0x12345678)
-        self.assertEquals(buf, msg.pack())
-
-        # Invalid length
-        buf = "\x01\x00\x00\x09\x12\x34\x56\x78\x9a"
-        with self.assertRaisesRegexp(ofp.ProtocolError, "should be 8"):
-            ofp.message.hello.unpack(buf)
-
     def test_echo_request_construction(self):
         msg = ofp.message.echo_request(data="abc")
         self.assertEquals(msg.data, "abc")
 
-    def test_echo_request_pack(self):
-        msg = ofp.message.echo_request(xid=0x12345678, data="abc")
-        buf = msg.pack()
-        self.assertEquals(buf, "\x01\x02\x00\x0b\x12\x34\x56\x78\x61\x62\x63")
-
-        msg2 = ofp.message.echo_request.unpack(buf)
-        self.assertEquals(msg, msg2)
-
-    def test_echo_request_unpack(self):
-        # Normal case
-        buf = "\x01\x02\x00\x0b\x12\x34\x56\x78\x61\x62\x63"
-        msg = ofp.message.echo_request(xid=0x12345678, data="abc")
-        self.assertEquals(buf, msg.pack())
-
-        # Invalid length
+    def test_echo_request_invalid_length(self):
         buf = "\x01\x02\x00\x07\x12\x34\x56"
-        with self.assertRaisesRegexp(ofp.ProtocolError, "buffer too short"):
-            ofp.message.echo_request.unpack(buf)
+        with self.assertRaisesRegexp(ofp.ProtocolError, "Buffer too short"):
+            ofp.message.echo_request.unpack(OFReader(buf))
 
     def test_echo_request_equality(self):
         msg = ofp.message.echo_request(xid=0x12345678, data="abc")
         msg2 = ofp.message.echo_request(xid=0x12345678, data="abc")
-        #msg2 = ofp.message.echo_request.unpack(msg.pack())
         self.assertEquals(msg, msg2)
 
         msg2.xid = 1
@@ -380,471 +169,11 @@
         self.assertNotEquals(msg, msg2)
         msg2.data = msg.data
 
-    def test_echo_request_show(self):
-        expected = "echo_request { xid = 0x12345678, data = 'ab\\x01' }"
-        msg = ofp.message.echo_request(xid=0x12345678, data="ab\x01")
-        self.assertEquals(msg.show(), expected)
-
-    def test_flow_add(self):
-        match = ofp.match()
-        msg = ofp.message.flow_add(xid=1,
-                                   match=match,
-                                   cookie=1,
-                                   idle_timeout=5,
-                                   flags=ofp.OFPFF_CHECK_OVERLAP,
-                                   actions=[
-                                       ofp.action.output(port=1),
-                                       ofp.action.output(port=2),
-                                       ofp.action.output(port=ofp.OFPP_CONTROLLER,
-                                                         max_len=1024)])
-        buf = msg.pack()
-        msg2 = ofp.message.flow_add.unpack(buf)
-        self.assertEquals(msg, msg2)
-
-    def test_port_mod_pack(self):
-        msg = ofp.message.port_mod(xid=2,
-                                   port_no=ofp.OFPP_CONTROLLER,
-                                   hw_addr=[1,2,3,4,5,6],
-                                   config=0x90ABCDEF,
-                                   mask=0xFF11FF11,
-                                   advertise=0xCAFE6789)
-        expected = "\x01\x0f\x00\x20\x00\x00\x00\x02\xff\xfd\x01\x02\x03\x04\x05\x06\x90\xab\xcd\xef\xff\x11\xff\x11\xca\xfe\x67\x89\x00\x00\x00\x00"
-        self.assertEquals(expected, msg.pack())
-
-    def test_desc_stats_reply_pack(self):
-        msg = ofp.message.desc_stats_reply(xid=3,
-                                           flags=ofp.OFPSF_REPLY_MORE,
-                                           mfr_desc="The Indigo-2 Community",
-                                           hw_desc="Unknown server",
-                                           sw_desc="Indigo-2 LRI pre-release",
-                                           serial_num="11235813213455",
-                                           dp_desc="Indigo-2 LRI forwarding module")
-        expected = ''.join([
-            '\x01', '\x11', # version/type
-            '\x04\x2c', # length
-            '\x00\x00\x00\x03', # xid
-            '\x00\x00', # stats_type
-            '\x00\x01', # flags
-            'The Indigo-2 Community'.ljust(256, '\x00'), # mfr_desc
-            'Unknown server'.ljust(256, '\x00'), # hw_desc
-            'Indigo-2 LRI pre-release'.ljust(256, '\x00'), # sw_desc
-            '11235813213455'.ljust(32, '\x00'), # serial_num
-            'Indigo-2 LRI forwarding module'.ljust(256, '\x00'), # dp_desc
-        ])
-        self.assertEquals(expected, msg.pack())
-
-    def test_desc_stats_reply_unpack(self):
-        buf = ''.join([
-            '\x01', '\x11', # version/type
-            '\x04\x2c', # length
-            '\x00\x00\x00\x03', # xid
-            '\x00\x00', # stats_type
-            '\x00\x01', # flags
-            'The Indigo-2 Community'.ljust(256, '\x00'), # mfr_desc
-            'Unknown server'.ljust(256, '\x00'), # hw_desc
-            'Indigo-2 LRI pre-release'.ljust(256, '\x00'), # sw_desc
-            '11235813213455'.ljust(32, '\x00'), # serial_num
-            'Indigo-2 LRI forwarding module'.ljust(256, '\x00'), # dp_desc
-        ])
-        msg = ofp.message.desc_stats_reply.unpack(buf)
-        self.assertEquals('Indigo-2 LRI forwarding module', msg.dp_desc)
-        self.assertEquals('11235813213455', msg.serial_num)
-        self.assertEquals(ofp.OFPST_DESC, msg.stats_type)
-        self.assertEquals(ofp.OFPSF_REPLY_MORE, msg.flags)
-
-    def test_port_status_pack(self):
-        desc = ofp.port_desc(port_no=ofp.OFPP_CONTROLLER,
-                             hw_addr=[1,2,3,4,5,6],
-                             name="foo",
-                             config=ofp.OFPPC_NO_FLOOD,
-                             state=ofp.OFPPS_STP_FORWARD,
-                             curr=ofp.OFPPF_10MB_HD,
-                             advertised=ofp.OFPPF_1GB_FD,
-                             supported=ofp.OFPPF_AUTONEG,
-                             peer=ofp.OFPPF_PAUSE_ASYM)
-
-        msg = ofp.message.port_status(xid=4,
-                                      reason=ofp.OFPPR_DELETE,
-                                      desc=desc)
-        expected = ''.join([
-            '\x01', '\x0c', # version/type
-            '\x00\x40', # length
-            '\x00\x00\x00\x04', # xid
-            '\x01', # reason
-            '\x00\x00\x00\x00\x00\x00\x00' # pad
-            '\xff\xfd', # desc.port_no
-            '\x01\x02\x03\x04\x05\x06', # desc.hw_addr
-            'foo'.ljust(16, '\x00'), # desc.name
-            '\x00\x00\x00\x10', # desc.config
-            '\x00\x00\x02\x00', # desc.state
-            '\x00\x00\x00\x01', # desc.curr
-            '\x00\x00\x00\x20', # desc.advertised
-            '\x00\x00\x02\x00', # desc.supported
-            '\x00\x00\x08\x00', # desc.peer
-        ])
-        self.assertEquals(expected, msg.pack())
-
-    def test_port_status_unpack(self):
-        buf = ''.join([
-            '\x01', '\x0c', # version/type
-            '\x00\x40', # length
-            '\x00\x00\x00\x04', # xid
-            '\x01', # reason
-            '\x00\x00\x00\x00\x00\x00\x00' # pad
-            '\xff\xfd', # desc.port_no
-            '\x01\x02\x03\x04\x05\x06', # desc.hw_addr
-            'foo'.ljust(16, '\x00'), # desc.name
-            '\x00\x00\x00\x10', # desc.config
-            '\x00\x00\x02\x00', # desc.state
-            '\x00\x00\x00\x01', # desc.curr
-            '\x00\x00\x00\x20', # desc.advertised
-            '\x00\x00\x02\x00', # desc.supported
-            '\x00\x00\x08\x00', # desc.peer
-        ])
-        msg = ofp.message.port_status.unpack(buf)
-        self.assertEquals('foo', msg.desc.name)
-        self.assertEquals(ofp.OFPPF_PAUSE_ASYM, msg.desc.peer)
-
-    def test_port_stats_reply_pack(self):
-        msg = ofp.message.port_stats_reply(xid=5, flags=0, entries=[
-            ofp.port_stats_entry(port_no=1, rx_packets=56, collisions=5),
-            ofp.port_stats_entry(port_no=ofp.OFPP_LOCAL, rx_packets=1, collisions=1)])
-        expected = ''.join([
-            '\x01', '\x11', # version/type
-            '\x00\xdc', # length
-            '\x00\x00\x00\x05', # xid
-            '\x00\x04', # stats_type
-            '\x00\x00', # flags
-            '\x00\x01', # entries[0].port_no
-            '\x00\x00\x00\x00\x00\x00' # entries[0].pad
-            '\x00\x00\x00\x00\x00\x00\x00\x38', # entries[0].rx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_frame_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_over_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_crc_err
-            '\x00\x00\x00\x00\x00\x00\x00\x05', # entries[0].collisions
-            '\xff\xfe', # entries[1].port_no
-            '\x00\x00\x00\x00\x00\x00' # entries[1].pad
-            '\x00\x00\x00\x00\x00\x00\x00\x01', # entries[1].rx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_frame_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_over_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_crc_err
-            '\x00\x00\x00\x00\x00\x00\x00\x01', # entries[1].collisions
-        ])
-        self.assertEquals(expected, msg.pack())
-
-    def test_port_stats_reply_unpack(self):
-        buf = ''.join([
-            '\x01', '\x11', # version/type
-            '\x00\xdc', # length
-            '\x00\x00\x00\x05', # xid
-            '\x00\x04', # stats_type
-            '\x00\x00', # flags
-            '\x00\x01', # entries[0].port_no
-            '\x00\x00\x00\x00\x00\x00' # entries[0].pad
-            '\x00\x00\x00\x00\x00\x00\x00\x38', # entries[0].rx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].tx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_frame_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_over_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[0].rx_crc_err
-            '\x00\x00\x00\x00\x00\x00\x00\x05', # entries[0].collisions
-            '\xff\xfe', # entries[1].port_no
-            '\x00\x00\x00\x00\x00\x00' # entries[1].pad
-            '\x00\x00\x00\x00\x00\x00\x00\x01', # entries[1].rx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_packets
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_bytes
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_dropped
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].tx_errors
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_frame_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_over_err
-            '\x00\x00\x00\x00\x00\x00\x00\x00', # entries[1].rx_crc_err
-            '\x00\x00\x00\x00\x00\x00\x00\x01', # entries[1].collisions
-        ])
-        msg = ofp.message.port_stats_reply.unpack(buf)
-        self.assertEquals(ofp.OFPST_PORT, msg.stats_type)
-        self.assertEquals(2, len(msg.entries))
-
-    sample_flow_stats_reply_buf = ''.join([
-        '\x01', '\x11', # version/type
-        '\x00\xe4', # length
-        '\x00\x00\x00\x06', # xid
-        '\x00\x01', # stats_type
-        '\x00\x00', # flags
-        '\x00\x68', # entries[0].length
-        '\x03', # entries[0].table_id
-        '\x00', # entries[0].pad
-        '\x00\x3f\xff\xff', # entries[0].match.wildcards
-        '\x00' * 36, # remaining match fields
-        '\x00\x00\x00\x01', # entries[0].duration_sec
-        '\x00\x00\x00\x02', # entries[0].duration_nsec
-        '\x00\x64', # entries[0].priority
-        '\x00\x05', # entries[0].idle_timeout
-        '\x00\x0a', # entries[0].hard_timeout
-        '\x00' * 6, # entries[0].pad2
-        '\x01\x23\x45\x67\x89\xab\xcd\xef', # entries[0].cookie
-        '\x00\x00\x00\x00\x00\x00\x00\x0a', # entries[0].packet_count
-        '\x00\x00\x00\x00\x00\x00\x03\xe8', # entries[0].byte_count
-        '\x00\x00', # entries[0].actions[0].type
-        '\x00\x08', # entries[0].actions[0].len
-        '\x00\x01', # entries[0].actions[0].port
-        '\x00\x00', # entries[0].actions[0].max_len
-        '\x00\x00', # entries[0].actions[1].type
-        '\x00\x08', # entries[0].actions[1].len
-        '\x00\x02', # entries[0].actions[1].port
-        '\x00\x00', # entries[0].actions[1].max_len
-        '\x00\x70', # entries[1].length
-        '\x04', # entries[1].table_id
-        '\x00', # entries[1].pad
-        '\x00\x3f\xff\xff', # entries[1].match.wildcards
-        '\x00' * 36, # remaining match fields
-        '\x00\x00\x00\x01', # entries[1].duration_sec
-        '\x00\x00\x00\x02', # entries[1].duration_nsec
-        '\x00\x64', # entries[1].priority
-        '\x00\x05', # entries[1].idle_timeout
-        '\x00\x0a', # entries[1].hard_timeout
-        '\x00' * 6, # entries[1].pad2
-        '\x01\x23\x45\x67\x89\xab\xcd\xef', # entries[1].cookie
-        '\x00\x00\x00\x00\x00\x00\x00\x0a', # entries[1].packet_count
-        '\x00\x00\x00\x00\x00\x00\x03\xe8', # entries[1].byte_count
-        '\x00\x00', # entries[1].actions[0].type
-        '\x00\x08', # entries[1].actions[0].len
-        '\x00\x01', # entries[1].actions[0].port
-        '\x00\x00', # entries[1].actions[0].max_len
-        '\x00\x00', # entries[1].actions[1].type
-        '\x00\x08', # entries[1].actions[1].len
-        '\x00\x02', # entries[1].actions[1].port
-        '\x00\x00', # entries[1].actions[1].max_len
-        '\x00\x00', # entries[1].actions[2].type
-        '\x00\x08', # entries[1].actions[2].len
-        '\x00\x03', # entries[1].actions[2].port
-        '\x00\x00', # entries[1].actions[2].max_len
-    ])
-
-    def test_flow_stats_reply_pack(self):
-        msg = ofp.message.flow_stats_reply(xid=6, flags=0, entries=[
-            ofp.flow_stats_entry(table_id=3,
-                                 match=ofp.match(),
-                                 duration_sec=1,
-                                 duration_nsec=2,
-                                 priority=100,
-                                 idle_timeout=5,
-                                 hard_timeout=10,
-                                 cookie=0x0123456789abcdef,
-                                 packet_count=10,
-                                 byte_count=1000,
-                                 actions=[ofp.action.output(port=1),
-                                          ofp.action.output(port=2)]),
-            ofp.flow_stats_entry(table_id=4,
-                                 match=ofp.match(),
-                                 duration_sec=1,
-                                 duration_nsec=2,
-                                 priority=100,
-                                 idle_timeout=5,
-                                 hard_timeout=10,
-                                 cookie=0x0123456789abcdef,
-                                 packet_count=10,
-                                 byte_count=1000,
-                                 actions=[ofp.action.output(port=1),
-                                          ofp.action.output(port=2),
-                                          ofp.action.output(port=3)])])
-        self.assertEquals(self.sample_flow_stats_reply_buf, msg.pack())
-
-    def test_flow_stats_reply_unpack(self):
-        msg = ofp.message.flow_stats_reply.unpack(self.sample_flow_stats_reply_buf)
-        self.assertEquals(ofp.OFPST_FLOW, msg.stats_type)
-        self.assertEquals(2, len(msg.entries))
-        self.assertEquals(2, len(msg.entries[0].actions))
-        self.assertEquals(3, len(msg.entries[1].actions))
-
-    def test_flow_add_show(self):
-        expected = """\
-flow_add {
-  xid = None,
-  match = match_v1 {
-    wildcards = OFPFW_DL_SRC|OFPFW_DL_DST,
-    in_port = 3,
-    eth_src = 01:23:45:67:89:ab,
-    eth_dst = cd:ef:01:23:45:67,
-    vlan_vid = 0x0,
-    vlan_pcp = 0x0,
-    pad = 0x0,
-    eth_type = 0x0,
-    ip_dscp = 0x0,
-    ip_proto = 0x0,
-    pad1 = [ 0, 0 ],
-    ipv4_src = 192.168.3.127,
-    ipv4_dst = 255.255.255.255,
-    tcp_src = 0x0,
-    tcp_dst = 0x0
-  },
-  cookie = 0x0,
-  idle_timeout = 0x0,
-  hard_timeout = 0x0,
-  priority = 0x0,
-  buffer_id = 0x0,
-  out_port = 0,
-  flags = 0x0,
-  actions = [
-    output { port = OFPP_FLOOD, max_len = 0x0 },
-    nicira_dec_ttl { pad = 0x0, pad1 = 0x0 },
-    bsn_set_tunnel_dst { dst = 0x0 }
-  ]
-}"""
-        msg = ofp.message.flow_add(
-            match=ofp.match(
-                wildcards=ofp.OFPFW_DL_SRC|ofp.OFPFW_DL_DST,
-                in_port=3,
-                ipv4_src=0xc0a8037f,
-                ipv4_dst=0xffffffff,
-                eth_src=[0x01, 0x23, 0x45, 0x67, 0x89, 0xab],
-                eth_dst=[0xcd, 0xef, 0x01, 0x23, 0x45, 0x67]),
-            actions=[
-                ofp.action.output(port=ofp.OFPP_FLOOD),
-                ofp.action.nicira_dec_ttl(),
-                ofp.action.bsn_set_tunnel_dst()])
-        self.assertEquals(msg.show(), expected)
-
-    sample_packet_out_buf = ''.join([
-        '\x01', '\x0d', # version/type
-        '\x00\x23', # length
-        '\x12\x34\x56\x78', # xid
-        '\xab\xcd\xef\x01', # buffer_id
-        '\xff\xfe', # in_port
-        '\x00\x10', # actions_len
-        '\x00\x00', # actions[0].type
-        '\x00\x08', # actions[0].len
-        '\x00\x01', # actions[0].port
-        '\x00\x00', # actions[0].max_len
-        '\x00\x00', # actions[1].type
-        '\x00\x08', # actions[1].len
-        '\x00\x02', # actions[1].port
-        '\x00\x00', # actions[1].max_len
-        'abc' # data
-    ])
-
-    def test_packet_out_pack(self):
-        msg = ofp.message.packet_out(
-            xid=0x12345678,
-            buffer_id=0xabcdef01,
-            in_port=ofp.OFPP_LOCAL,
-            actions=[
-                ofp.action.output(port=1),
-                ofp.action.output(port=2)],
-            data='abc')
-        self.assertEquals(self.sample_packet_out_buf, msg.pack())
-
-    def test_packet_out_unpack(self):
-        msg = ofp.message.packet_out.unpack(self.sample_packet_out_buf)
-        self.assertEquals(0x12345678, msg.xid)
-        self.assertEquals(0xabcdef01, msg.buffer_id)
-        self.assertEquals(ofp.OFPP_LOCAL, msg.in_port)
-        self.assertEquals(2, len(msg.actions))
-        self.assertEquals(1, msg.actions[0].port)
-        self.assertEquals(2, msg.actions[1].port)
-        self.assertEquals('abc', msg.data)
-
-    sample_packet_in_buf = ''.join([
-        '\x01', '\x0a', # version/type
-        '\x00\x15', # length
-        '\x12\x34\x56\x78', # xid
-        '\xab\xcd\xef\x01', # buffer_id
-        '\x00\x09', # total_len
-        '\xff\xfe', # in_port
-        '\x01', # reason
-        '\x00', # pad
-        'abc', # data
-    ])
-
-    def test_packet_in_pack(self):
-        msg = ofp.message.packet_in(
-            xid=0x12345678,
-            buffer_id=0xabcdef01,
-            total_len=9,
-            in_port=ofp.OFPP_LOCAL,
-            reason=ofp.OFPR_ACTION,
-            data='abc')
-        self.assertEquals(self.sample_packet_in_buf, msg.pack())
-
-    def test_packet_in_unpack(self):
-        msg = ofp.message.packet_in.unpack(self.sample_packet_in_buf)
-        self.assertEquals(0x12345678, msg.xid)
-        self.assertEquals(0xabcdef01, msg.buffer_id)
-        self.assertEquals(9, msg.total_len)
-        self.assertEquals(ofp.OFPP_LOCAL, msg.in_port)
-        self.assertEquals(ofp.OFPR_ACTION, msg.reason)
-        self.assertEquals('abc', msg.data)
-
-    sample_queue_get_config_reply_buf = ''.join([
-        '\x01', '\x15', # version/type
-        '\x00\x50', # length
-        '\x12\x34\x56\x78', # xid
-        '\xff\xfe', # port
-        '\x00\x00\x00\x00\x00\x00', # pad
-        '\x00\x00\x00\x01', # queues[0].queue_id
-        '\x00\x18', # queues[0].len
-        '\x00\x00', # queues[0].pad
-        '\x00\x01', # queues[0].properties[0].type
-        '\x00\x10', # queues[0].properties[0].length
-        '\x00\x00\x00\x00', # queues[0].properties[0].pad
-        '\x00\x05', # queues[0].properties[0].rate
-        '\x00\x00\x00\x00\x00\x00', # queues[0].properties[0].pad2
-        '\x00\x00\x00\x02', # queues[1].queue_id
-        '\x00\x28', # queues[1].len
-        '\x00\x00', # queues[1].pad
-        '\x00\x01', # queues[1].properties[0].type
-        '\x00\x10', # queues[1].properties[0].length
-        '\x00\x00\x00\x00', # queues[1].properties[0].pad
-        '\x00\x06', # queues[1].properties[0].rate
-        '\x00\x00\x00\x00\x00\x00', # queues[1].properties[0].pad2
-        '\x00\x01', # queues[1].properties[1].type
-        '\x00\x10', # queues[1].properties[1].length
-        '\x00\x00\x00\x00', # queues[1].properties[1].pad
-        '\x00\x07', # queues[1].properties[1].rate
-        '\x00\x00\x00\x00\x00\x00', # queues[1].properties[1].pad2
-    ])
-
-    def test_queue_get_config_reply_pack(self):
-        msg = ofp.message.queue_get_config_reply(
-            xid=0x12345678,
-            port=ofp.OFPP_LOCAL,
-            queues=[
-                ofp.packet_queue(queue_id=1, properties=[
-                    ofp.queue_prop_min_rate(rate=5)]),
-                ofp.packet_queue(queue_id=2, properties=[
-                    ofp.queue_prop_min_rate(rate=6),
-                    ofp.queue_prop_min_rate(rate=7)])])
-        self.assertEquals(self.sample_queue_get_config_reply_buf, msg.pack())
-
-    def test_queue_get_config_reply_unpack(self):
-        msg = ofp.message.queue_get_config_reply.unpack(self.sample_queue_get_config_reply_buf)
-        self.assertEquals(ofp.OFPP_LOCAL, msg.port)
-        self.assertEquals(msg.queues[0].queue_id, 1)
-        self.assertEquals(msg.queues[0].properties[0].rate, 5)
-        self.assertEquals(msg.queues[1].queue_id, 2)
-        self.assertEquals(msg.queues[1].properties[0].rate, 6)
-        self.assertEquals(msg.queues[1].properties[1].rate, 7)
+# The majority of the serialization tests are created here using the files in
+# the test_data directory.
+class TestDataFiles(unittest.TestCase):
+    pass
+add_datafiles_tests(TestDataFiles, 'of10/', ofp)
 
 class TestParse(unittest.TestCase):
     def test_parse_header(self):
@@ -867,26 +196,24 @@
         msg = ofp.message.parse_message(buf)
         assert(msg.xid == 0x12345678)
 
-        # Get a list of all message classes
+        # Get a list of all concrete message classes
         test_klasses = [x for x in ofp.message.__dict__.values()
                         if type(x) == type
-                           and issubclass(x, ofp.message.Message)
-                           and x != ofp.message.Message]
+                           and issubclass(x, ofp.message.message)
+                           and not hasattr(x, 'subtypes')]
 
         for klass in test_klasses:
             self.assertIsInstance(ofp.message.parse_message(klass(xid=1).pack()), klass)
 
-class TestUtils(unittest.TestCase):
-    def test_unpack_array(self):
+    def test_parse_unknown_message(self):
         import loxi
-        import loxi.of10.util as util
+        import loxi.of10 as ofp
 
-        a = util.unpack_array(str, 3, "abcdefghi")
-        self.assertEquals(['abc', 'def', 'ghi'], a)
+        buf = "\x01\xfe\x00\x08\x12\x34\x56\x78"
+        msg = ofp.message.parse_message(buf)
+        self.assertIsInstance(msg, ofp.message.message)
 
-        with self.assertRaisesRegexp(loxi.ProtocolError, "invalid array length"):
-            util.unpack_array(str, 3, "abcdefgh")
-
+class TestUtils(unittest.TestCase):
     def test_pretty_wildcards(self):
         self.assertEquals("OFPFW_ALL", ofp.util.pretty_wildcards(ofp.OFPFW_ALL))
         self.assertEquals("0", ofp.util.pretty_wildcards(0))
@@ -910,7 +237,9 @@
         mods = [ofp.action,ofp.message,ofp.common]
         self.klasses = [klass for mod in mods
                               for klass in mod.__dict__.values()
-                              if hasattr(klass, 'show')]
+                              if isinstance(klass, type) and
+                                 issubclass(klass, loxi.OFObject) and
+                                 not hasattr(klass, 'subtypes')]
         self.klasses.sort(key=lambda x: str(x))
 
     def test_serialization(self):
@@ -920,7 +249,22 @@
                 obj = klass()
                 if hasattr(obj, "xid"): obj.xid = 42
                 buf = obj.pack()
-                obj2 = klass.unpack(buf)
+                obj2 = klass.unpack(OFReader(buf))
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_parse_message(self):
+        expected_failures = []
+        for klass in self.klasses:
+            if not issubclass(klass, ofp.message.message):
+                continue
+            def fn():
+                obj = klass(xid=42)
+                buf = obj.pack()
+                obj2 = ofp.message.parse_message(buf)
                 self.assertEquals(obj, obj2)
             if klass in expected_failures:
                 self.assertRaises(Exception, fn)
diff --git a/py_gen/tests/of11.py b/py_gen/tests/of11.py
new file mode 100644
index 0000000..7ae6d8a
--- /dev/null
+++ b/py_gen/tests/of11.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+import unittest
+
+try:
+    import loxi
+    import loxi.of11 as ofp
+    from loxi.generic_util import OFReader
+except ImportError:
+    exit("loxi package not found. Try setting PYTHONPATH.")
+
+class TestImports(unittest.TestCase):
+    def test_toplevel(self):
+        import loxi
+        self.assertTrue(hasattr(loxi, "ProtocolError"))
+        self.assertEquals(loxi.version_names[2], "1.1")
+        ofp = loxi.protocol(2)
+        self.assertEquals(ofp.OFP_VERSION, 2)
+        self.assertTrue(hasattr(ofp, "action"))
+        self.assertTrue(hasattr(ofp, "common"))
+        self.assertTrue(hasattr(ofp, "const"))
+        self.assertTrue(hasattr(ofp, "message"))
+
+    def test_version(self):
+        import loxi
+        self.assertTrue(hasattr(loxi.of11, "ProtocolError"))
+        self.assertTrue(hasattr(loxi.of11, "OFP_VERSION"))
+        self.assertEquals(loxi.of11.OFP_VERSION, 2)
+        self.assertTrue(hasattr(loxi.of11, "action"))
+        self.assertTrue(hasattr(loxi.of11, "common"))
+        self.assertTrue(hasattr(loxi.of11, "const"))
+        self.assertTrue(hasattr(loxi.of11, "message"))
+
+class TestAllOF11(unittest.TestCase):
+    """
+    Round-trips every class through serialization/deserialization.
+    Not a replacement for handcoded tests because it only uses the
+    default member values.
+    """
+
+    def setUp(self):
+        mods = [ofp.action,ofp.message,ofp.common]
+        self.klasses = [klass for mod in mods
+                              for klass in mod.__dict__.values()
+                              if isinstance(klass, type) and
+                                 issubclass(klass, loxi.OFObject) and
+                                 not hasattr(klass, 'subtypes')]
+        self.klasses.sort(key=lambda x: str(x))
+
+    def test_serialization(self):
+        expected_failures = []
+        for klass in self.klasses:
+            def fn():
+                obj = klass()
+                if hasattr(obj, "xid"): obj.xid = 42
+                buf = obj.pack()
+                obj2 = klass.unpack(OFReader(buf))
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_parse_message(self):
+        expected_failures = []
+        for klass in self.klasses:
+            if not issubclass(klass, ofp.message.message):
+                continue
+            def fn():
+                obj = klass(xid=42)
+                buf = obj.pack()
+                obj2 = ofp.message.parse_message(buf)
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_show(self):
+        expected_failures = []
+        for klass in self.klasses:
+            def fn():
+                obj = klass()
+                if hasattr(obj, "xid"): obj.xid = 42
+                obj.show()
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/py_gen/tests/of12.py b/py_gen/tests/of12.py
new file mode 100644
index 0000000..c463c50
--- /dev/null
+++ b/py_gen/tests/of12.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+import unittest
+from testutil import add_datafiles_tests
+
+try:
+    import loxi
+    import loxi.of12 as ofp
+    from loxi.generic_util import OFReader
+except ImportError:
+    exit("loxi package not found. Try setting PYTHONPATH.")
+
+class TestImports(unittest.TestCase):
+    def test_toplevel(self):
+        import loxi
+        self.assertTrue(hasattr(loxi, "ProtocolError"))
+        self.assertEquals(loxi.version_names[3], "1.2")
+        ofp = loxi.protocol(3)
+        self.assertEquals(ofp.OFP_VERSION, 3)
+        self.assertTrue(hasattr(ofp, "action"))
+        self.assertTrue(hasattr(ofp, "common"))
+        self.assertTrue(hasattr(ofp, "const"))
+        self.assertTrue(hasattr(ofp, "message"))
+        self.assertTrue(hasattr(ofp, "oxm"))
+
+    def test_version(self):
+        import loxi
+        self.assertTrue(hasattr(loxi.of12, "ProtocolError"))
+        self.assertTrue(hasattr(loxi.of12, "OFP_VERSION"))
+        self.assertEquals(loxi.of12.OFP_VERSION, 3)
+        self.assertTrue(hasattr(loxi.of12, "action"))
+        self.assertTrue(hasattr(loxi.of12, "common"))
+        self.assertTrue(hasattr(loxi.of12, "const"))
+        self.assertTrue(hasattr(loxi.of12, "message"))
+        self.assertTrue(hasattr(loxi.of12, "oxm"))
+
+# The majority of the serialization tests are created here using the files in
+# the test_data directory.
+class TestDataFiles(unittest.TestCase):
+    pass
+add_datafiles_tests(TestDataFiles, 'of12/', ofp)
+
+class TestAllOF12(unittest.TestCase):
+    """
+    Round-trips every class through serialization/deserialization.
+    Not a replacement for handcoded tests because it only uses the
+    default member values.
+    """
+
+    def setUp(self):
+        mods = [ofp.action,ofp.message,ofp.common,ofp.oxm]
+        self.klasses = [klass for mod in mods
+                              for klass in mod.__dict__.values()
+                              if isinstance(klass, type) and
+                                 issubclass(klass, loxi.OFObject) and
+                                 not hasattr(klass, 'subtypes')]
+        self.klasses.sort(key=lambda x: str(x))
+
+    def test_serialization(self):
+        expected_failures = [
+            ofp.action.set_field, # field defaults to None
+        ]
+        for klass in self.klasses:
+            def fn():
+                obj = klass()
+                if hasattr(obj, "xid"): obj.xid = 42
+                buf = obj.pack()
+                obj2 = klass.unpack(OFReader(buf))
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_parse_message(self):
+        expected_failures = []
+        for klass in self.klasses:
+            if not issubclass(klass, ofp.message.message):
+                continue
+            def fn():
+                obj = klass(xid=42)
+                buf = obj.pack()
+                obj2 = ofp.message.parse_message(buf)
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_show(self):
+        expected_failures = []
+        for klass in self.klasses:
+            def fn():
+                obj = klass()
+                if hasattr(obj, "xid"): obj.xid = 42
+                obj.show()
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/py_gen/tests/of13.py b/py_gen/tests/of13.py
new file mode 100644
index 0000000..c5a16b2
--- /dev/null
+++ b/py_gen/tests/of13.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+import unittest
+from testutil import test_serialization
+from testutil import add_datafiles_tests
+
+try:
+    import loxi
+    import loxi.of13 as ofp
+    from loxi.generic_util import OFReader
+except ImportError:
+    exit("loxi package not found. Try setting PYTHONPATH.")
+
+class TestImports(unittest.TestCase):
+    def test_toplevel(self):
+        import loxi
+        self.assertTrue(hasattr(loxi, "ProtocolError"))
+        self.assertEquals(loxi.version_names[4], "1.3")
+        ofp = loxi.protocol(4)
+        self.assertEquals(ofp.OFP_VERSION, 4)
+        self.assertTrue(hasattr(ofp, "action"))
+        self.assertTrue(hasattr(ofp, "common"))
+        self.assertTrue(hasattr(ofp, "const"))
+        self.assertTrue(hasattr(ofp, "message"))
+        self.assertTrue(hasattr(ofp, "oxm"))
+
+    def test_version(self):
+        import loxi
+        self.assertTrue(hasattr(loxi.of13, "ProtocolError"))
+        self.assertTrue(hasattr(loxi.of13, "OFP_VERSION"))
+        self.assertEquals(loxi.of13.OFP_VERSION, 4)
+        self.assertTrue(hasattr(loxi.of13, "action"))
+        self.assertTrue(hasattr(loxi.of13, "common"))
+        self.assertTrue(hasattr(loxi.of13, "const"))
+        self.assertTrue(hasattr(loxi.of13, "message"))
+        self.assertTrue(hasattr(loxi.of13, "oxm"))
+
+# The majority of the serialization tests are created here using the files in
+# the test_data directory.
+class TestDataFiles(unittest.TestCase):
+    pass
+add_datafiles_tests(TestDataFiles, 'of13/', ofp)
+
+class TestAllOF13(unittest.TestCase):
+    """
+    Round-trips every class through serialization/deserialization.
+    Not a replacement for handcoded tests because it only uses the
+    default member values.
+    """
+
+    def setUp(self):
+        mods = [ofp.action,ofp.message,ofp.common,ofp.oxm]
+        self.klasses = [klass for mod in mods
+                              for klass in mod.__dict__.values()
+                              if isinstance(klass, type) and
+                                 issubclass(klass, loxi.OFObject) and
+                                 not hasattr(klass, 'subtypes')]
+        self.klasses.sort(key=lambda x: str(x))
+
+    def test_serialization(self):
+        expected_failures = [
+            ofp.action.set_field, # field defaults to None
+        ]
+        for klass in self.klasses:
+            def fn():
+                obj = klass()
+                if hasattr(obj, "xid"): obj.xid = 42
+                buf = obj.pack()
+                obj2 = klass.unpack(OFReader(buf))
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_parse_message(self):
+        expected_failures = [
+        ]
+        for klass in self.klasses:
+            if not issubclass(klass, ofp.message.message):
+                continue
+            def fn():
+                obj = klass(xid=42)
+                buf = obj.pack()
+                obj2 = ofp.message.parse_message(buf)
+                self.assertEquals(obj, obj2)
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+    def test_show(self):
+        expected_failures = []
+        for klass in self.klasses:
+            def fn():
+                obj = klass()
+                if hasattr(obj, "xid"): obj.xid = 42
+                obj.show()
+            if klass in expected_failures:
+                self.assertRaises(Exception, fn)
+            else:
+                fn()
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/py_gen/tests/testutil.py b/py_gen/tests/testutil.py
new file mode 100644
index 0000000..f8d5543
--- /dev/null
+++ b/py_gen/tests/testutil.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import sys
+import difflib
+import re
+import os
+import unittest
+import test_data
+from loxi.generic_util import OFReader
+
+# Human-friendly format for binary strings. 8 bytes per line.
+def format_binary(buf):
+    byts = map(ord, buf)
+    lines = [[]]
+    for byt in byts:
+        if len(lines[-1]) == 8:
+            lines.append([])
+        lines[-1].append(byt)
+    return '\n'.join([' '.join(['%02x' % y for y in x]) for x in lines])
+
+def diff(a, b):
+    return '\n'.join(difflib.ndiff(a.splitlines(), b.splitlines()))
+
+# Test serialization / deserialization / reserialization of a sample object.
+# Depends in part on the __eq__ method being correct.
+def test_serialization(obj, buf):
+    packed = obj.pack()
+    if packed != buf:
+        a = format_binary(buf)
+        b = format_binary(packed)
+        raise AssertionError("Serialization of %s failed\nExpected:\n%s\nActual:\n%s\nDiff:\n%s" % \
+            (type(obj).__name__, a, b, diff(a, b)))
+    unpacked = type(obj).unpack(OFReader(buf))
+    if obj != unpacked:
+        a = obj.show()
+        b = unpacked.show()
+        raise AssertionError("Deserialization of %s failed\nExpected:\n%s\nActual:\n%s\nDiff:\n%s" % \
+            (type(obj).__name__, a, b, diff(a, b)))
+    packed = unpacked.pack()
+    if packed != buf:
+        a = format_binary(buf)
+        b = format_binary(packed)
+        raise AssertionError("Reserialization of %s failed\nExpected:\n%s\nActual:\n%s\nDiff:\n%s" % \
+            (type(obj).__name__, a, b, diff(a, b)))
+
+def test_pretty(obj, expected):
+    pretty = obj.show()
+    if expected != pretty:
+        raise AssertionError("Pretty printing of %s failed\nExpected:\n%s\nActual:\n%s\nDiff:\n%s" % \
+            (type(obj).__name__, expected, pretty, diff(expected, pretty)))
+
+# Run test_serialization and possibly test_pretty against the named data file
+def test_datafile(name, ofp):
+    data = test_data.read(name)
+    if not 'python' in data:
+        raise unittest.SkipTest("no python section in datafile")
+    binary = data['binary']
+    python = data['python']
+    obj = eval(python, { 'ofp': ofp })
+    test_serialization(obj, binary)
+    if 'python pretty-printer' in data:
+        test_pretty(obj, data['python pretty-printer'])
+
+# Add test_datafile tests for each datafile matching the given regex
+# The argument 'klass' should be a subclass of TestCase which will have the
+# test_* methods added to it.
+#
+# It would be cleaner to do this by constructing a TestSuite instance and
+# adding individual TestCase objects, but the TestLoader wouldn't pick it
+# up. We could use the load_tests protocol but that isn't available before
+# Python 2.7.
+def add_datafiles_tests(klass, regex, ofp):
+    for filename in test_data.list_files():
+        match = re.match(regex, filename)
+        if not match:
+            continue
+        def make_test(filename):
+            def fn(self):
+                test_datafile(filename, ofp)
+            return fn
+        setattr(klass, 'test_' + os.path.splitext(filename)[0], make_test(filename))
diff --git a/py_gen/util.py b/py_gen/util.py
index 068a234..2a0dc54 100644
--- a/py_gen/util.py
+++ b/py_gen/util.py
@@ -30,50 +30,29 @@
 """
 
 import os
-import of_g
-import loxi_front_end.type_maps as type_maps
+import loxi_globals
+import template_utils
 import loxi_utils.loxi_utils as utils
 
 templates_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
 
 def render_template(out, name, **context):
-    utils.render_template(out, name, [templates_dir], context)
+    template_utils.render_template(out, name, [templates_dir], context)
 
 def render_static(out, name):
-    utils.render_static(out, name, [templates_dir])
-
-def lookup_unified_class(cls, version):
-    unified_class = of_g.unified[cls][version]
-    if "use_version" in unified_class: # deref version ref
-        ref_version = unified_class["use_version"]
-        unified_class = of_g.unified[cls][ref_version]
-    return unified_class
-
-def primary_wire_type(cls, version):
-    if cls in type_maps.stats_reply_list:
-        return type_maps.type_val[("of_stats_reply", version)]
-    elif cls in type_maps.stats_request_list:
-        return type_maps.type_val[("of_stats_request", version)]
-    elif cls in type_maps.flow_mod_list:
-        return type_maps.type_val[("of_flow_mod", version)]
-    elif (cls, version) in type_maps.type_val:
-        return type_maps.type_val[(cls, version)]
-    elif type_maps.message_is_extension(cls, version):
-        return type_maps.type_val[("of_experimenter", version)]
-    elif type_maps.action_is_extension(cls, version):
-        return type_maps.type_val[("of_action_experimenter", version)]
-    elif type_maps.action_id_is_extension(cls, version):
-        return type_maps.type_val[("of_action_id_experimenter", version)]
-    elif type_maps.instruction_is_extension(cls, version):
-        return type_maps.type_val[("of_instruction_experimenter", version)]
-    elif type_maps.queue_prop_is_extension(cls, version):
-        return type_maps.type_val[("of_queue_prop_experimenter", version)]
-    elif type_maps.table_feature_prop_is_extension(cls, version):
-        return type_maps.type_val[("of_table_feature_prop_experimenter", version)]
-    else:
-        raise ValueError
+    template_utils.render_static(out, name, [templates_dir])
 
 def constant_for_value(version, group, value):
-    return (["const." + v["ofp_name"] for k, v in of_g.identifiers.items()
-             if k in of_g.identifiers_by_group[group] and
-                v["values_by_version"].get(version, None) == value] or [value])[0]
+    enums = loxi_globals.ir[version].enums
+    enum = [x for x in enums if x.name == group][0]
+    for name, value2 in enum.values:
+        if value == value2:
+            return "const." + name
+    return repr(value)
+
+def ancestors(ofclass):
+    r = []
+    while ofclass:
+        r.append(ofclass)
+        ofclass = ofclass.superclass
+    return r
diff --git a/pyparsing.py b/pyparsing.py
index 9be97dc..ed34209 100644
--- a/pyparsing.py
+++ b/pyparsing.py
@@ -1,3749 +1,3749 @@
-# module pyparsing.py

-#

-# Copyright (c) 2003-2011  Paul T. McGuire

-#

-# Permission is hereby granted, free of charge, to any person obtaining

-# a copy of this software and associated documentation files (the

-# "Software"), to deal in the Software without restriction, including

-# without limitation the rights to use, copy, modify, merge, publish,

-# distribute, sublicense, and/or sell copies of the Software, and to

-# permit persons to whom the Software is furnished to do so, subject to

-# the following conditions:

-#

-# The above copyright notice and this permission notice shall be

-# included in all copies or substantial portions of the Software.

-#

-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,

-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF

-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.

-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY

-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,

-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE

-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

-#

-#from __future__ import generators

-

-__doc__ = \

-"""

-pyparsing module - Classes and methods to define and execute parsing grammars

-

-The pyparsing module is an alternative approach to creating and executing simple grammars,

-vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you

-don't need to learn a new syntax for defining grammars or matching expressions - the parsing module

-provides a library of classes that you use to construct the grammar directly in Python.

-

-Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::

-

-    from pyparsing import Word, alphas

-

-    # define grammar of a greeting

-    greet = Word( alphas ) + "," + Word( alphas ) + "!"

-

-    hello = "Hello, World!"

-    print hello, "->", greet.parseString( hello )

-

-The program outputs the following::

-

-    Hello, World! -> ['Hello', ',', 'World', '!']

-

-The Python representation of the grammar is quite readable, owing to the self-explanatory

-class names, and the use of '+', '|' and '^' operators.

-

-The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an

-object with named attributes.

-

-The pyparsing module handles some of the problems that are typically vexing when writing text parsers:

- - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)

- - quoted strings

- - embedded comments

-"""

-

-__version__ = "1.5.6"

-__versionTime__ = "26 June 2011 10:53"

-__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"

-

-import string

-from weakref import ref as wkref

-import copy

-import sys

-import warnings

-import re

-import sre_constants

-#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )

-

-__all__ = [

-'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',

-'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',

-'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',

-'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',

-'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',

-'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',

-'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',

-'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',

-'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',

-'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',

-'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',

-'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',

-'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',

-'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 

-'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',

-'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',

-'indentedBlock', 'originalTextFor',

-]

-

-"""

-Detect if we are running version 3.X and make appropriate changes

-Robert A. Clark

-"""

-_PY3K = sys.version_info[0] > 2

-if _PY3K:

-    _MAX_INT = sys.maxsize

-    basestring = str

-    unichr = chr

-    _ustr = str

-    alphas = string.ascii_lowercase + string.ascii_uppercase

-else:

-    _MAX_INT = sys.maxint

-    range = xrange

-    set = lambda s : dict( [(c,0) for c in s] )

-    alphas = string.lowercase + string.uppercase

-

-    def _ustr(obj):

-        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries

-           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It

-           then < returns the unicode object | encodes it with the default encoding | ... >.

-        """

-        if isinstance(obj,unicode):

-            return obj

-

-        try:

-            # If this works, then _ustr(obj) has the same behaviour as str(obj), so

-            # it won't break any existing code.

-            return str(obj)

-

-        except UnicodeEncodeError:

-            # The Python docs (http://docs.python.org/ref/customization.html#l2h-182)

-            # state that "The return value must be a string object". However, does a

-            # unicode object (being a subclass of basestring) count as a "string

-            # object"?

-            # If so, then return a unicode object:

-            return unicode(obj)

-            # Else encode it... but how? There are many choices... :)

-            # Replace unprintables with escape codes?

-            #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')

-            # Replace unprintables with question marks?

-            #return unicode(obj).encode(sys.getdefaultencoding(), 'replace')

-            # ...

-            

-    alphas = string.lowercase + string.uppercase

-

-# build list of single arg builtins, tolerant of Python version, that can be used as parse actions

-singleArgBuiltins = []

-import __builtin__

-for fname in "sum len enumerate sorted reversed list tuple set any all".split():

-    try:

-        singleArgBuiltins.append(getattr(__builtin__,fname))

-    except AttributeError:

-        continue

-

-def _xml_escape(data):

-    """Escape &, <, >, ", ', etc. in a string of data."""

-

-    # ampersand must be replaced first

-    from_symbols = '&><"\''

-    to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]

-    for from_,to_ in zip(from_symbols, to_symbols):

-        data = data.replace(from_, to_)

-    return data

-

-class _Constants(object):

-    pass

-

-nums       = string.digits

-hexnums    = nums + "ABCDEFabcdef"

-alphanums  = alphas + nums

-_bslash    = chr(92)

-printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )

-

-class ParseBaseException(Exception):

-    """base exception class for all parsing runtime exceptions"""

-    # Performance tuning: we construct a *lot* of these, so keep this

-    # constructor as small and fast as possible

-    def __init__( self, pstr, loc=0, msg=None, elem=None ):

-        self.loc = loc

-        if msg is None:

-            self.msg = pstr

-            self.pstr = ""

-        else:

-            self.msg = msg

-            self.pstr = pstr

-        self.parserElement = elem

-

-    def __getattr__( self, aname ):

-        """supported attributes by name are:

-            - lineno - returns the line number of the exception text

-            - col - returns the column number of the exception text

-            - line - returns the line containing the exception text

-        """

-        if( aname == "lineno" ):

-            return lineno( self.loc, self.pstr )

-        elif( aname in ("col", "column") ):

-            return col( self.loc, self.pstr )

-        elif( aname == "line" ):

-            return line( self.loc, self.pstr )

-        else:

-            raise AttributeError(aname)

-

-    def __str__( self ):

-        return "%s (at char %d), (line:%d, col:%d)" % \

-                ( self.msg, self.loc, self.lineno, self.column )

-    def __repr__( self ):

-        return _ustr(self)

-    def markInputline( self, markerString = ">!<" ):

-        """Extracts the exception line from the input string, and marks

-           the location of the exception with a special symbol.

-        """

-        line_str = self.line

-        line_column = self.column - 1

-        if markerString:

-            line_str = "".join( [line_str[:line_column],

-                                markerString, line_str[line_column:]])

-        return line_str.strip()

-    def __dir__(self):

-        return "loc msg pstr parserElement lineno col line " \

-               "markInputLine __str__ __repr__".split()

-

-class ParseException(ParseBaseException):

-    """exception thrown when parse expressions don't match class;

-       supported attributes by name are:

-        - lineno - returns the line number of the exception text

-        - col - returns the column number of the exception text

-        - line - returns the line containing the exception text

-    """

-    pass

-

-class ParseFatalException(ParseBaseException):

-    """user-throwable exception thrown when inconsistent parse content

-       is found; stops all parsing immediately"""

-    pass

-

-class ParseSyntaxException(ParseFatalException):

-    """just like C{ParseFatalException}, but thrown internally when an

-       C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because

-       an unbacktrackable syntax error has been found"""

-    def __init__(self, pe):

-        super(ParseSyntaxException, self).__init__(

-                                    pe.pstr, pe.loc, pe.msg, pe.parserElement)

-

-#~ class ReparseException(ParseBaseException):

-    #~ """Experimental class - parse actions can raise this exception to cause

-       #~ pyparsing to reparse the input string:

-        #~ - with a modified input string, and/or

-        #~ - with a modified start location

-       #~ Set the values of the ReparseException in the constructor, and raise the

-       #~ exception in a parse action to cause pyparsing to use the new string/location.

-       #~ Setting the values as None causes no change to be made.

-       #~ """

-    #~ def __init_( self, newstring, restartLoc ):

-        #~ self.newParseText = newstring

-        #~ self.reparseLoc = restartLoc

-

-class RecursiveGrammarException(Exception):

-    """exception thrown by C{validate()} if the grammar could be improperly recursive"""

-    def __init__( self, parseElementList ):

-        self.parseElementTrace = parseElementList

-

-    def __str__( self ):

-        return "RecursiveGrammarException: %s" % self.parseElementTrace

-

-class _ParseResultsWithOffset(object):

-    def __init__(self,p1,p2):

-        self.tup = (p1,p2)

-    def __getitem__(self,i):

-        return self.tup[i]

-    def __repr__(self):

-        return repr(self.tup)

-    def setOffset(self,i):

-        self.tup = (self.tup[0],i)

-

-class ParseResults(object):

-    """Structured parse results, to provide multiple means of access to the parsed data:

-       - as a list (C{len(results)})

-       - by list index (C{results[0], results[1]}, etc.)

-       - by attribute (C{results.<resultsName>})

-       """

-    #~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )

-    def __new__(cls, toklist, name=None, asList=True, modal=True ):

-        if isinstance(toklist, cls):

-            return toklist

-        retobj = object.__new__(cls)

-        retobj.__doinit = True

-        return retobj

-

-    # Performance tuning: we construct a *lot* of these, so keep this

-    # constructor as small and fast as possible

-    def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):

-        if self.__doinit:

-            self.__doinit = False

-            self.__name = None

-            self.__parent = None

-            self.__accumNames = {}

-            if isinstance(toklist, list):

-                self.__toklist = toklist[:]

-            else:

-                self.__toklist = [toklist]

-            self.__tokdict = dict()

-

-        if name is not None and name:

-            if not modal:

-                self.__accumNames[name] = 0

-            if isinstance(name,int):

-                name = _ustr(name) # will always return a str, but use _ustr for consistency

-            self.__name = name

-            if not toklist in (None,'',[]):

-                if isinstance(toklist,basestring):

-                    toklist = [ toklist ]

-                if asList:

-                    if isinstance(toklist,ParseResults):

-                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)

-                    else:

-                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)

-                    self[name].__name = name

-                else:

-                    try:

-                        self[name] = toklist[0]

-                    except (KeyError,TypeError,IndexError):

-                        self[name] = toklist

-

-    def __getitem__( self, i ):

-        if isinstance( i, (int,slice) ):

-            return self.__toklist[i]

-        else:

-            if i not in self.__accumNames:

-                return self.__tokdict[i][-1][0]

-            else:

-                return ParseResults([ v[0] for v in self.__tokdict[i] ])

-

-    def __setitem__( self, k, v, isinstance=isinstance ):

-        if isinstance(v,_ParseResultsWithOffset):

-            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]

-            sub = v[0]

-        elif isinstance(k,int):

-            self.__toklist[k] = v

-            sub = v

-        else:

-            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]

-            sub = v

-        if isinstance(sub,ParseResults):

-            sub.__parent = wkref(self)

-

-    def __delitem__( self, i ):

-        if isinstance(i,(int,slice)):

-            mylen = len( self.__toklist )

-            del self.__toklist[i]

-

-            # convert int to slice

-            if isinstance(i, int):

-                if i < 0:

-                    i += mylen

-                i = slice(i, i+1)

-            # get removed indices

-            removed = list(range(*i.indices(mylen)))

-            removed.reverse()

-            # fixup indices in token dictionary

-            for name in self.__tokdict:

-                occurrences = self.__tokdict[name]

-                for j in removed:

-                    for k, (value, position) in enumerate(occurrences):

-                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))

-        else:

-            del self.__tokdict[i]

-

-    def __contains__( self, k ):

-        return k in self.__tokdict

-

-    def __len__( self ): return len( self.__toklist )

-    def __bool__(self): return len( self.__toklist ) > 0

-    __nonzero__ = __bool__

-    def __iter__( self ): return iter( self.__toklist )

-    def __reversed__( self ): return iter( self.__toklist[::-1] )

-    def keys( self ):

-        """Returns all named result keys."""

-        return self.__tokdict.keys()

-

-    def pop( self, index=-1 ):

-        """Removes and returns item at specified index (default=last).

-           Will work with either numeric indices or dict-key indicies."""

-        ret = self[index]

-        del self[index]

-        return ret

-

-    def get(self, key, defaultValue=None):

-        """Returns named result matching the given key, or if there is no

-           such name, then returns the given C{defaultValue} or C{None} if no

-           C{defaultValue} is specified."""

-        if key in self:

-            return self[key]

-        else:

-            return defaultValue

-

-    def insert( self, index, insStr ):

-        """Inserts new element at location index in the list of parsed tokens."""

-        self.__toklist.insert(index, insStr)

-        # fixup indices in token dictionary

-        for name in self.__tokdict:

-            occurrences = self.__tokdict[name]

-            for k, (value, position) in enumerate(occurrences):

-                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))

-

-    def items( self ):

-        """Returns all named result keys and values as a list of tuples."""

-        return [(k,self[k]) for k in self.__tokdict]

-

-    def values( self ):

-        """Returns all named result values."""

-        return [ v[-1][0] for v in self.__tokdict.values() ]

-

-    def __getattr__( self, name ):

-        if True: #name not in self.__slots__:

-            if name in self.__tokdict:

-                if name not in self.__accumNames:

-                    return self.__tokdict[name][-1][0]

-                else:

-                    return ParseResults([ v[0] for v in self.__tokdict[name] ])

-            else:

-                return ""

-        return None

-

-    def __add__( self, other ):

-        ret = self.copy()

-        ret += other

-        return ret

-

-    def __iadd__( self, other ):

-        if other.__tokdict:

-            offset = len(self.__toklist)

-            addoffset = ( lambda a: (a<0 and offset) or (a+offset) )

-            otheritems = other.__tokdict.items()

-            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )

-                                for (k,vlist) in otheritems for v in vlist]

-            for k,v in otherdictitems:

-                self[k] = v

-                if isinstance(v[0],ParseResults):

-                    v[0].__parent = wkref(self)

-            

-        self.__toklist += other.__toklist

-        self.__accumNames.update( other.__accumNames )

-        return self

-

-    def __radd__(self, other):

-        if isinstance(other,int) and other == 0:

-            return self.copy()

-        

-    def __repr__( self ):

-        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )

-

-    def __str__( self ):

-        out = "["

-        sep = ""

-        for i in self.__toklist:

-            if isinstance(i, ParseResults):

-                out += sep + _ustr(i)

-            else:

-                out += sep + repr(i)

-            sep = ", "

-        out += "]"

-        return out

-

-    def _asStringList( self, sep='' ):

-        out = []

-        for item in self.__toklist:

-            if out and sep:

-                out.append(sep)

-            if isinstance( item, ParseResults ):

-                out += item._asStringList()

-            else:

-                out.append( _ustr(item) )

-        return out

-

-    def asList( self ):

-        """Returns the parse results as a nested list of matching tokens, all converted to strings."""

-        out = []

-        for res in self.__toklist:

-            if isinstance(res,ParseResults):

-                out.append( res.asList() )

-            else:

-                out.append( res )

-        return out

-

-    def asDict( self ):

-        """Returns the named parse results as dictionary."""

-        return dict( self.items() )

-

-    def copy( self ):

-        """Returns a new copy of a C{ParseResults} object."""

-        ret = ParseResults( self.__toklist )

-        ret.__tokdict = self.__tokdict.copy()

-        ret.__parent = self.__parent

-        ret.__accumNames.update( self.__accumNames )

-        ret.__name = self.__name

-        return ret

-

-    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):

-        """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""

-        nl = "\n"

-        out = []

-        namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()

-                                                            for v in vlist ] )

-        nextLevelIndent = indent + "  "

-

-        # collapse out indents if formatting is not desired

-        if not formatted:

-            indent = ""

-            nextLevelIndent = ""

-            nl = ""

-

-        selfTag = None

-        if doctag is not None:

-            selfTag = doctag

-        else:

-            if self.__name:

-                selfTag = self.__name

-

-        if not selfTag:

-            if namedItemsOnly:

-                return ""

-            else:

-                selfTag = "ITEM"

-

-        out += [ nl, indent, "<", selfTag, ">" ]

-

-        worklist = self.__toklist

-        for i,res in enumerate(worklist):

-            if isinstance(res,ParseResults):

-                if i in namedItems:

-                    out += [ res.asXML(namedItems[i],

-                                        namedItemsOnly and doctag is None,

-                                        nextLevelIndent,

-                                        formatted)]

-                else:

-                    out += [ res.asXML(None,

-                                        namedItemsOnly and doctag is None,

-                                        nextLevelIndent,

-                                        formatted)]

-            else:

-                # individual token, see if there is a name for it

-                resTag = None

-                if i in namedItems:

-                    resTag = namedItems[i]

-                if not resTag:

-                    if namedItemsOnly:

-                        continue

-                    else:

-                        resTag = "ITEM"

-                xmlBodyText = _xml_escape(_ustr(res))

-                out += [ nl, nextLevelIndent, "<", resTag, ">",

-                                                xmlBodyText,

-                                                "</", resTag, ">" ]

-

-        out += [ nl, indent, "</", selfTag, ">" ]

-        return "".join(out)

-

-    def __lookup(self,sub):

-        for k,vlist in self.__tokdict.items():

-            for v,loc in vlist:

-                if sub is v:

-                    return k

-        return None

-

-    def getName(self):

-        """Returns the results name for this token expression."""

-        if self.__name:

-            return self.__name

-        elif self.__parent:

-            par = self.__parent()

-            if par:

-                return par.__lookup(self)

-            else:

-                return None

-        elif (len(self) == 1 and

-               len(self.__tokdict) == 1 and

-               self.__tokdict.values()[0][0][1] in (0,-1)):

-            return self.__tokdict.keys()[0]

-        else:

-            return None

-

-    def dump(self,indent='',depth=0):

-        """Diagnostic method for listing out the contents of a C{ParseResults}.

-           Accepts an optional C{indent} argument so that this string can be embedded

-           in a nested display of other data."""

-        out = []

-        out.append( indent+_ustr(self.asList()) )

-        keys = self.items()

-        keys.sort()

-        for k,v in keys:

-            if out:

-                out.append('\n')

-            out.append( "%s%s- %s: " % (indent,('  '*depth), k) )

-            if isinstance(v,ParseResults):

-                if v.keys():

-                    out.append( v.dump(indent,depth+1) )

-                else:

-                    out.append(_ustr(v))

-            else:

-                out.append(_ustr(v))

-        return "".join(out)

-

-    # add support for pickle protocol

-    def __getstate__(self):

-        return ( self.__toklist,

-                 ( self.__tokdict.copy(),

-                   self.__parent is not None and self.__parent() or None,

-                   self.__accumNames,

-                   self.__name ) )

-

-    def __setstate__(self,state):

-        self.__toklist = state[0]

-        (self.__tokdict,

-         par,

-         inAccumNames,

-         self.__name) = state[1]

-        self.__accumNames = {}

-        self.__accumNames.update(inAccumNames)

-        if par is not None:

-            self.__parent = wkref(par)

-        else:

-            self.__parent = None

-

-    def __dir__(self):

-        return dir(super(ParseResults,self)) + self.keys()

-

-def col (loc,strg):

-    """Returns current column within a string, counting newlines as line separators.

-   The first column is number 1.

-

-   Note: the default parsing behavior is to expand tabs in the input string

-   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information

-   on parsing strings containing <TAB>s, and suggested methods to maintain a

-   consistent view of the parsed string, the parse location, and line and column

-   positions within the parsed string.

-   """

-    return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)

-

-def lineno(loc,strg):

-    """Returns current line number within a string, counting newlines as line separators.

-   The first line is number 1.

-

-   Note: the default parsing behavior is to expand tabs in the input string

-   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information

-   on parsing strings containing <TAB>s, and suggested methods to maintain a

-   consistent view of the parsed string, the parse location, and line and column

-   positions within the parsed string.

-   """

-    return strg.count("\n",0,loc) + 1

-

-def line( loc, strg ):

-    """Returns the line of text containing loc within a string, counting newlines as line separators.

-       """

-    lastCR = strg.rfind("\n", 0, loc)

-    nextCR = strg.find("\n", loc)

-    if nextCR >= 0:

-        return strg[lastCR+1:nextCR]

-    else:

-        return strg[lastCR+1:]

-

-def _defaultStartDebugAction( instring, loc, expr ):

-    print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))

-

-def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):

-    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))

-

-def _defaultExceptionDebugAction( instring, loc, expr, exc ):

-    print ("Exception raised:" + _ustr(exc))

-

-def nullDebugAction(*args):

-    """'Do-nothing' debug action, to suppress debugging output during parsing."""

-    pass

-

-'decorator to trim function calls to match the arity of the target'

-if not _PY3K:

-    def _trim_arity(func, maxargs=2):

-        limit = [0]

-        def wrapper(*args):

-            while 1:

-                try:

-                    return func(*args[limit[0]:])

-                except TypeError:

-                    if limit[0] <= maxargs:

-                        limit[0] += 1

-                        continue

-                    raise

-        return wrapper

-else:

-    def _trim_arity(func, maxargs=2):

-        limit = maxargs

-        def wrapper(*args):

-            #~ nonlocal limit

-            while 1:

-                try:

-                    return func(*args[limit:])

-                except TypeError:

-                    if limit:

-                        limit -= 1

-                        continue

-                    raise

-        return wrapper

-    

-class ParserElement(object):

-    """Abstract base level parser element class."""

-    DEFAULT_WHITE_CHARS = " \n\t\r"

-    verbose_stacktrace = False

-

-    def setDefaultWhitespaceChars( chars ):

-        """Overrides the default whitespace chars

-        """

-        ParserElement.DEFAULT_WHITE_CHARS = chars

-    setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)

-

-    def __init__( self, savelist=False ):

-        self.parseAction = list()

-        self.failAction = None

-        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall

-        self.strRepr = None

-        self.resultsName = None

-        self.saveAsList = savelist

-        self.skipWhitespace = True

-        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS

-        self.copyDefaultWhiteChars = True

-        self.mayReturnEmpty = False # used when checking for left-recursion

-        self.keepTabs = False

-        self.ignoreExprs = list()

-        self.debug = False

-        self.streamlined = False

-        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index

-        self.errmsg = ""

-        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)

-        self.debugActions = ( None, None, None ) #custom debug actions

-        self.re = None

-        self.callPreparse = True # used to avoid redundant calls to preParse

-        self.callDuringTry = False

-

-    def copy( self ):

-        """Make a copy of this C{ParserElement}.  Useful for defining different parse actions

-           for the same parsing pattern, using copies of the original parse element."""

-        cpy = copy.copy( self )

-        cpy.parseAction = self.parseAction[:]

-        cpy.ignoreExprs = self.ignoreExprs[:]

-        if self.copyDefaultWhiteChars:

-            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS

-        return cpy

-

-    def setName( self, name ):

-        """Define name for this expression, for use in debugging."""

-        self.name = name

-        self.errmsg = "Expected " + self.name

-        if hasattr(self,"exception"):

-            self.exception.msg = self.errmsg

-        return self

-

-    def setResultsName( self, name, listAllMatches=False ):

-        """Define name for referencing matching tokens as a nested attribute

-           of the returned parse results.

-           NOTE: this returns a *copy* of the original C{ParserElement} object;

-           this is so that the client can define a basic element, such as an

-           integer, and reference it in multiple places with different names.

-           

-           You can also set results names using the abbreviated syntax,

-           C{expr("name")} in place of C{expr.setResultsName("name")} - 

-           see L{I{__call__}<__call__>}.

-        """

-        newself = self.copy()

-        if name.endswith("*"):

-            name = name[:-1]

-            listAllMatches=True

-        newself.resultsName = name

-        newself.modalResults = not listAllMatches

-        return newself

-

-    def setBreak(self,breakFlag = True):

-        """Method to invoke the Python pdb debugger when this element is

-           about to be parsed. Set C{breakFlag} to True to enable, False to

-           disable.

-        """

-        if breakFlag:

-            _parseMethod = self._parse

-            def breaker(instring, loc, doActions=True, callPreParse=True):

-                import pdb

-                pdb.set_trace()

-                return _parseMethod( instring, loc, doActions, callPreParse )

-            breaker._originalParseMethod = _parseMethod

-            self._parse = breaker

-        else:

-            if hasattr(self._parse,"_originalParseMethod"):

-                self._parse = self._parse._originalParseMethod

-        return self

-

-    def setParseAction( self, *fns, **kwargs ):

-        """Define action to perform when successfully matching parse element definition.

-           Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},

-           C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:

-            - s   = the original string being parsed (see note below)

-            - loc = the location of the matching substring

-            - toks = a list of the matched tokens, packaged as a ParseResults object

-           If the functions in fns modify the tokens, they can return them as the return

-           value from fn, and the modified list of tokens will replace the original.

-           Otherwise, fn does not need to return any value.

-

-           Note: the default parsing behavior is to expand tabs in the input string

-           before starting the parsing process.  See L{I{parseString}<parseString>} for more information

-           on parsing strings containing <TAB>s, and suggested methods to maintain a

-           consistent view of the parsed string, the parse location, and line and column

-           positions within the parsed string.

-           """

-        self.parseAction = list(map(_trim_arity, list(fns)))

-        self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])

-        return self

-

-    def addParseAction( self, *fns, **kwargs ):

-        """Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""

-        self.parseAction += list(map(_trim_arity, list(fns)))

-        self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])

-        return self

-

-    def setFailAction( self, fn ):

-        """Define action to perform if parsing fails at this expression.

-           Fail acton fn is a callable function that takes the arguments

-           C{fn(s,loc,expr,err)} where:

-            - s = string being parsed

-            - loc = location where expression match was attempted and failed

-            - expr = the parse expression that failed

-            - err = the exception thrown

-           The function returns no value.  It may throw C{ParseFatalException}

-           if it is desired to stop parsing immediately."""

-        self.failAction = fn

-        return self

-

-    def _skipIgnorables( self, instring, loc ):

-        exprsFound = True

-        while exprsFound:

-            exprsFound = False

-            for e in self.ignoreExprs:

-                try:

-                    while 1:

-                        loc,dummy = e._parse( instring, loc )

-                        exprsFound = True

-                except ParseException:

-                    pass

-        return loc

-

-    def preParse( self, instring, loc ):

-        if self.ignoreExprs:

-            loc = self._skipIgnorables( instring, loc )

-

-        if self.skipWhitespace:

-            wt = self.whiteChars

-            instrlen = len(instring)

-            while loc < instrlen and instring[loc] in wt:

-                loc += 1

-

-        return loc

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        return loc, []

-

-    def postParse( self, instring, loc, tokenlist ):

-        return tokenlist

-

-    #~ @profile

-    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):

-        debugging = ( self.debug ) #and doActions )

-

-        if debugging or self.failAction:

-            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))

-            if (self.debugActions[0] ):

-                self.debugActions[0]( instring, loc, self )

-            if callPreParse and self.callPreparse:

-                preloc = self.preParse( instring, loc )

-            else:

-                preloc = loc

-            tokensStart = preloc

-            try:

-                try:

-                    loc,tokens = self.parseImpl( instring, preloc, doActions )

-                except IndexError:

-                    raise ParseException( instring, len(instring), self.errmsg, self )

-            except ParseBaseException:

-                #~ print ("Exception raised:", err)

-                err = None

-                if self.debugActions[2]:

-                    err = sys.exc_info()[1]

-                    self.debugActions[2]( instring, tokensStart, self, err )

-                if self.failAction:

-                    if err is None:

-                        err = sys.exc_info()[1]

-                    self.failAction( instring, tokensStart, self, err )

-                raise

-        else:

-            if callPreParse and self.callPreparse:

-                preloc = self.preParse( instring, loc )

-            else:

-                preloc = loc

-            tokensStart = preloc

-            if self.mayIndexError or loc >= len(instring):

-                try:

-                    loc,tokens = self.parseImpl( instring, preloc, doActions )

-                except IndexError:

-                    raise ParseException( instring, len(instring), self.errmsg, self )

-            else:

-                loc,tokens = self.parseImpl( instring, preloc, doActions )

-

-        tokens = self.postParse( instring, loc, tokens )

-

-        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )

-        if self.parseAction and (doActions or self.callDuringTry):

-            if debugging:

-                try:

-                    for fn in self.parseAction:

-                        tokens = fn( instring, tokensStart, retTokens )

-                        if tokens is not None:

-                            retTokens = ParseResults( tokens,

-                                                      self.resultsName,

-                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),

-                                                      modal=self.modalResults )

-                except ParseBaseException:

-                    #~ print "Exception raised in user parse action:", err

-                    if (self.debugActions[2] ):

-                        err = sys.exc_info()[1]

-                        self.debugActions[2]( instring, tokensStart, self, err )

-                    raise

-            else:

-                for fn in self.parseAction:

-                    tokens = fn( instring, tokensStart, retTokens )

-                    if tokens is not None:

-                        retTokens = ParseResults( tokens,

-                                                  self.resultsName,

-                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),

-                                                  modal=self.modalResults )

-

-        if debugging:

-            #~ print ("Matched",self,"->",retTokens.asList())

-            if (self.debugActions[1] ):

-                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )

-

-        return loc, retTokens

-

-    def tryParse( self, instring, loc ):

-        try:

-            return self._parse( instring, loc, doActions=False )[0]

-        except ParseFatalException:

-            raise ParseException( instring, loc, self.errmsg, self)

-

-    # this method gets repeatedly called during backtracking with the same arguments -

-    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression

-    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):

-        lookup = (self,instring,loc,callPreParse,doActions)

-        if lookup in ParserElement._exprArgCache:

-            value = ParserElement._exprArgCache[ lookup ]

-            if isinstance(value, Exception):

-                raise value

-            return (value[0],value[1].copy())

-        else:

-            try:

-                value = self._parseNoCache( instring, loc, doActions, callPreParse )

-                ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())

-                return value

-            except ParseBaseException:

-                pe = sys.exc_info()[1]

-                ParserElement._exprArgCache[ lookup ] = pe

-                raise

-

-    _parse = _parseNoCache

-

-    # argument cache for optimizing repeated calls when backtracking through recursive expressions

-    _exprArgCache = {}

-    def resetCache():

-        ParserElement._exprArgCache.clear()

-    resetCache = staticmethod(resetCache)

-

-    _packratEnabled = False

-    def enablePackrat():

-        """Enables "packrat" parsing, which adds memoizing to the parsing logic.

-           Repeated parse attempts at the same string location (which happens

-           often in many complex grammars) can immediately return a cached value,

-           instead of re-executing parsing/validating code.  Memoizing is done of

-           both valid results and parsing exceptions.

-

-           This speedup may break existing programs that use parse actions that

-           have side-effects.  For this reason, packrat parsing is disabled when

-           you first import pyparsing.  To activate the packrat feature, your

-           program must call the class method C{ParserElement.enablePackrat()}.  If

-           your program uses C{psyco} to "compile as you go", you must call

-           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,

-           Python will crash.  For best results, call C{enablePackrat()} immediately

-           after importing pyparsing.

-        """

-        if not ParserElement._packratEnabled:

-            ParserElement._packratEnabled = True

-            ParserElement._parse = ParserElement._parseCache

-    enablePackrat = staticmethod(enablePackrat)

-

-    def parseString( self, instring, parseAll=False ):

-        """Execute the parse expression with the given string.

-           This is the main interface to the client code, once the complete

-           expression has been built.

-

-           If you want the grammar to require that the entire input string be

-           successfully parsed, then set C{parseAll} to True (equivalent to ending

-           the grammar with C{StringEnd()}).

-

-           Note: C{parseString} implicitly calls C{expandtabs()} on the input string,

-           in order to report proper column numbers in parse actions.

-           If the input string contains tabs and

-           the grammar uses parse actions that use the C{loc} argument to index into the

-           string being parsed, you can ensure you have a consistent view of the input

-           string by:

-            - calling C{parseWithTabs} on your grammar before calling C{parseString}

-              (see L{I{parseWithTabs}<parseWithTabs>})

-            - define your parse action using the full C{(s,loc,toks)} signature, and

-              reference the input string using the parse action's C{s} argument

-            - explictly expand the tabs in your input string before calling

-              C{parseString}

-        """

-        ParserElement.resetCache()

-        if not self.streamlined:

-            self.streamline()

-            #~ self.saveAsList = True

-        for e in self.ignoreExprs:

-            e.streamline()

-        if not self.keepTabs:

-            instring = instring.expandtabs()

-        try:

-            loc, tokens = self._parse( instring, 0 )

-            if parseAll:

-                loc = self.preParse( instring, loc )

-                se = Empty() + StringEnd()

-                se._parse( instring, loc )

-        except ParseBaseException:

-            if ParserElement.verbose_stacktrace:

-                raise

-            else:

-                # catch and re-raise exception from here, clears out pyparsing internal stack trace

-                exc = sys.exc_info()[1]

-                raise exc

-        else:

-            return tokens

-

-    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):

-        """Scan the input string for expression matches.  Each match will return the

-           matching tokens, start location, and end location.  May be called with optional

-           C{maxMatches} argument, to clip scanning after 'n' matches are found.  If

-           C{overlap} is specified, then overlapping matches will be reported.

-

-           Note that the start and end locations are reported relative to the string

-           being parsed.  See L{I{parseString}<parseString>} for more information on parsing

-           strings with embedded tabs."""

-        if not self.streamlined:

-            self.streamline()

-        for e in self.ignoreExprs:

-            e.streamline()

-

-        if not self.keepTabs:

-            instring = _ustr(instring).expandtabs()

-        instrlen = len(instring)

-        loc = 0

-        preparseFn = self.preParse

-        parseFn = self._parse

-        ParserElement.resetCache()

-        matches = 0

-        try:

-            while loc <= instrlen and matches < maxMatches:

-                try:

-                    preloc = preparseFn( instring, loc )

-                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )

-                except ParseException:

-                    loc = preloc+1

-                else:

-                    if nextLoc > loc:

-                        matches += 1

-                        yield tokens, preloc, nextLoc

-                        if overlap:

-                            nextloc = preparseFn( instring, loc )

-                            if nextloc > loc:

-                                loc = nextLoc

-                            else:

-                                loc += 1

-                        else:

-                            loc = nextLoc

-                    else:

-                        loc = preloc+1

-        except ParseBaseException:

-            if ParserElement.verbose_stacktrace:

-                raise

-            else:

-                # catch and re-raise exception from here, clears out pyparsing internal stack trace

-                exc = sys.exc_info()[1]

-                raise exc

-

-    def transformString( self, instring ):

-        """Extension to C{scanString}, to modify matching text with modified tokens that may

-           be returned from a parse action.  To use C{transformString}, define a grammar and

-           attach a parse action to it that modifies the returned token list.

-           Invoking C{transformString()} on a target string will then scan for matches,

-           and replace the matched text patterns according to the logic in the parse

-           action.  C{transformString()} returns the resulting transformed string."""

-        out = []

-        lastE = 0

-        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to

-        # keep string locs straight between transformString and scanString

-        self.keepTabs = True

-        try:

-            for t,s,e in self.scanString( instring ):

-                out.append( instring[lastE:s] )

-                if t:

-                    if isinstance(t,ParseResults):

-                        out += t.asList()

-                    elif isinstance(t,list):

-                        out += t

-                    else:

-                        out.append(t)

-                lastE = e

-            out.append(instring[lastE:])

-            out = [o for o in out if o]

-            return "".join(map(_ustr,_flatten(out)))

-        except ParseBaseException:

-            if ParserElement.verbose_stacktrace:

-                raise

-            else:

-                # catch and re-raise exception from here, clears out pyparsing internal stack trace

-                exc = sys.exc_info()[1]

-                raise exc

-

-    def searchString( self, instring, maxMatches=_MAX_INT ):

-        """Another extension to C{scanString}, simplifying the access to the tokens found

-           to match the given parse expression.  May be called with optional

-           C{maxMatches} argument, to clip searching after 'n' matches are found.

-        """

-        try:

-            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])

-        except ParseBaseException:

-            if ParserElement.verbose_stacktrace:

-                raise

-            else:

-                # catch and re-raise exception from here, clears out pyparsing internal stack trace

-                exc = sys.exc_info()[1]

-                raise exc

-

-    def __add__(self, other ):

-        """Implementation of + operator - returns And"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return And( [ self, other ] )

-

-    def __radd__(self, other ):

-        """Implementation of + operator when left operand is not a C{ParserElement}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return other + self

-

-    def __sub__(self, other):

-        """Implementation of - operator, returns C{And} with error stop"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return And( [ self, And._ErrorStop(), other ] )

-

-    def __rsub__(self, other ):

-        """Implementation of - operator when left operand is not a C{ParserElement}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return other - self

-

-    def __mul__(self,other):

-        """Implementation of * operator, allows use of C{expr * 3} in place of

-           C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer

-           tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples

-           may also include C{None} as in:

-            - C{expr*(n,None)} or C{expr*(n,)} is equivalent

-              to C{expr*n + ZeroOrMore(expr)}

-              (read as "at least n instances of C{expr}")

-            - C{expr*(None,n)} is equivalent to C{expr*(0,n)}

-              (read as "0 to n instances of C{expr}")

-            - C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}

-            - C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}

-

-           Note that C{expr*(None,n)} does not raise an exception if

-           more than n exprs exist in the input stream; that is,

-           C{expr*(None,n)} does not enforce a maximum number of expr

-           occurrences.  If this behavior is desired, then write

-           C{expr*(None,n) + ~expr}

-

-        """

-        if isinstance(other,int):

-            minElements, optElements = other,0

-        elif isinstance(other,tuple):

-            other = (other + (None, None))[:2]

-            if other[0] is None:

-                other = (0, other[1])

-            if isinstance(other[0],int) and other[1] is None:

-                if other[0] == 0:

-                    return ZeroOrMore(self)

-                if other[0] == 1:

-                    return OneOrMore(self)

-                else:

-                    return self*other[0] + ZeroOrMore(self)

-            elif isinstance(other[0],int) and isinstance(other[1],int):

-                minElements, optElements = other

-                optElements -= minElements

-            else:

-                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))

-        else:

-            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))

-

-        if minElements < 0:

-            raise ValueError("cannot multiply ParserElement by negative value")

-        if optElements < 0:

-            raise ValueError("second tuple value must be greater or equal to first tuple value")

-        if minElements == optElements == 0:

-            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")

-

-        if (optElements):

-            def makeOptionalList(n):

-                if n>1:

-                    return Optional(self + makeOptionalList(n-1))

-                else:

-                    return Optional(self)

-            if minElements:

-                if minElements == 1:

-                    ret = self + makeOptionalList(optElements)

-                else:

-                    ret = And([self]*minElements) + makeOptionalList(optElements)

-            else:

-                ret = makeOptionalList(optElements)

-        else:

-            if minElements == 1:

-                ret = self

-            else:

-                ret = And([self]*minElements)

-        return ret

-

-    def __rmul__(self, other):

-        return self.__mul__(other)

-

-    def __or__(self, other ):

-        """Implementation of | operator - returns C{MatchFirst}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return MatchFirst( [ self, other ] )

-

-    def __ror__(self, other ):

-        """Implementation of | operator when left operand is not a C{ParserElement}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return other | self

-

-    def __xor__(self, other ):

-        """Implementation of ^ operator - returns C{Or}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return Or( [ self, other ] )

-

-    def __rxor__(self, other ):

-        """Implementation of ^ operator when left operand is not a C{ParserElement}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return other ^ self

-

-    def __and__(self, other ):

-        """Implementation of & operator - returns C{Each}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return Each( [ self, other ] )

-

-    def __rand__(self, other ):

-        """Implementation of & operator when left operand is not a C{ParserElement}"""

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        if not isinstance( other, ParserElement ):

-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

-                    SyntaxWarning, stacklevel=2)

-            return None

-        return other & self

-

-    def __invert__( self ):

-        """Implementation of ~ operator - returns C{NotAny}"""

-        return NotAny( self )

-

-    def __call__(self, name):

-        """Shortcut for C{setResultsName}, with C{listAllMatches=default}::

-             userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")

-           could be written as::

-             userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")

-             

-           If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be

-           passed as C{True}.

-           """

-        return self.setResultsName(name)

-

-    def suppress( self ):

-        """Suppresses the output of this C{ParserElement}; useful to keep punctuation from

-           cluttering up returned output.

-        """

-        return Suppress( self )

-

-    def leaveWhitespace( self ):

-        """Disables the skipping of whitespace before matching the characters in the

-           C{ParserElement}'s defined pattern.  This is normally only used internally by

-           the pyparsing module, but may be needed in some whitespace-sensitive grammars.

-        """

-        self.skipWhitespace = False

-        return self

-

-    def setWhitespaceChars( self, chars ):

-        """Overrides the default whitespace chars

-        """

-        self.skipWhitespace = True

-        self.whiteChars = chars

-        self.copyDefaultWhiteChars = False

-        return self

-

-    def parseWithTabs( self ):

-        """Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.

-           Must be called before C{parseString} when the input grammar contains elements that

-           match C{<TAB>} characters."""

-        self.keepTabs = True

-        return self

-

-    def ignore( self, other ):

-        """Define expression to be ignored (e.g., comments) while doing pattern

-           matching; may be called repeatedly, to define multiple comment or other

-           ignorable patterns.

-        """

-        if isinstance( other, Suppress ):

-            if other not in self.ignoreExprs:

-                self.ignoreExprs.append( other.copy() )

-        else:

-            self.ignoreExprs.append( Suppress( other.copy() ) )

-        return self

-

-    def setDebugActions( self, startAction, successAction, exceptionAction ):

-        """Enable display of debugging messages while doing pattern matching."""

-        self.debugActions = (startAction or _defaultStartDebugAction,

-                             successAction or _defaultSuccessDebugAction,

-                             exceptionAction or _defaultExceptionDebugAction)

-        self.debug = True

-        return self

-

-    def setDebug( self, flag=True ):

-        """Enable display of debugging messages while doing pattern matching.

-           Set C{flag} to True to enable, False to disable."""

-        if flag:

-            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )

-        else:

-            self.debug = False

-        return self

-

-    def __str__( self ):

-        return self.name

-

-    def __repr__( self ):

-        return _ustr(self)

-

-    def streamline( self ):

-        self.streamlined = True

-        self.strRepr = None

-        return self

-

-    def checkRecursion( self, parseElementList ):

-        pass

-

-    def validate( self, validateTrace=[] ):

-        """Check defined expressions for valid structure, check for infinite recursive definitions."""

-        self.checkRecursion( [] )

-

-    def parseFile( self, file_or_filename, parseAll=False ):

-        """Execute the parse expression on the given file or filename.

-           If a filename is specified (instead of a file object),

-           the entire file is opened, read, and closed before parsing.

-        """

-        try:

-            file_contents = file_or_filename.read()

-        except AttributeError:

-            f = open(file_or_filename, "rb")

-            file_contents = f.read()

-            f.close()

-        try:

-            return self.parseString(file_contents, parseAll)

-        except ParseBaseException:

-            # catch and re-raise exception from here, clears out pyparsing internal stack trace

-            exc = sys.exc_info()[1]

-            raise exc

-

-    def getException(self):

-        return ParseException("",0,self.errmsg,self)

-

-    def __getattr__(self,aname):

-        if aname == "myException":

-            self.myException = ret = self.getException();

-            return ret;

-        else:

-            raise AttributeError("no such attribute " + aname)

-

-    def __eq__(self,other):

-        if isinstance(other, ParserElement):

-            return self is other or self.__dict__ == other.__dict__

-        elif isinstance(other, basestring):

-            try:

-                self.parseString(_ustr(other), parseAll=True)

-                return True

-            except ParseBaseException:

-                return False

-        else:

-            return super(ParserElement,self)==other

-

-    def __ne__(self,other):

-        return not (self == other)

-

-    def __hash__(self):

-        return hash(id(self))

-

-    def __req__(self,other):

-        return self == other

-

-    def __rne__(self,other):

-        return not (self == other)

-

-

-class Token(ParserElement):

-    """Abstract C{ParserElement} subclass, for defining atomic matching patterns."""

-    def __init__( self ):

-        super(Token,self).__init__( savelist=False )

-

-    def setName(self, name):

-        s = super(Token,self).setName(name)

-        self.errmsg = "Expected " + self.name

-        return s

-

-

-class Empty(Token):

-    """An empty token, will always match."""

-    def __init__( self ):

-        super(Empty,self).__init__()

-        self.name = "Empty"

-        self.mayReturnEmpty = True

-        self.mayIndexError = False

-

-

-class NoMatch(Token):

-    """A token that will never match."""

-    def __init__( self ):

-        super(NoMatch,self).__init__()

-        self.name = "NoMatch"

-        self.mayReturnEmpty = True

-        self.mayIndexError = False

-        self.errmsg = "Unmatchable token"

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        exc = self.myException

-        exc.loc = loc

-        exc.pstr = instring

-        raise exc

-

-

-class Literal(Token):

-    """Token to exactly match a specified string."""

-    def __init__( self, matchString ):

-        super(Literal,self).__init__()

-        self.match = matchString

-        self.matchLen = len(matchString)

-        try:

-            self.firstMatchChar = matchString[0]

-        except IndexError:

-            warnings.warn("null string passed to Literal; use Empty() instead",

-                            SyntaxWarning, stacklevel=2)

-            self.__class__ = Empty

-        self.name = '"%s"' % _ustr(self.match)

-        self.errmsg = "Expected " + self.name

-        self.mayReturnEmpty = False

-        self.mayIndexError = False

-

-    # Performance tuning: this routine gets called a *lot*

-    # if this is a single character match string  and the first character matches,

-    # short-circuit as quickly as possible, and avoid calling startswith

-    #~ @profile

-    def parseImpl( self, instring, loc, doActions=True ):

-        if (instring[loc] == self.firstMatchChar and

-            (self.matchLen==1 or instring.startswith(self.match,loc)) ):

-            return loc+self.matchLen, self.match

-        #~ raise ParseException( instring, loc, self.errmsg )

-        exc = self.myException

-        exc.loc = loc

-        exc.pstr = instring

-        raise exc

-_L = Literal

-

-class Keyword(Token):

-    """Token to exactly match a specified string as a keyword, that is, it must be

-       immediately followed by a non-keyword character.  Compare with C{Literal}::

-         Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.

-         Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}

-       Accepts two optional constructor arguments in addition to the keyword string:

-       C{identChars} is a string of characters that would be valid identifier characters,

-       defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive

-       matching, default is C{False}.

-    """

-    DEFAULT_KEYWORD_CHARS = alphanums+"_$"

-

-    def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):

-        super(Keyword,self).__init__()

-        self.match = matchString

-        self.matchLen = len(matchString)

-        try:

-            self.firstMatchChar = matchString[0]

-        except IndexError:

-            warnings.warn("null string passed to Keyword; use Empty() instead",

-                            SyntaxWarning, stacklevel=2)

-        self.name = '"%s"' % self.match

-        self.errmsg = "Expected " + self.name

-        self.mayReturnEmpty = False

-        self.mayIndexError = False

-        self.caseless = caseless

-        if caseless:

-            self.caselessmatch = matchString.upper()

-            identChars = identChars.upper()

-        self.identChars = set(identChars)

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if self.caseless:

-            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and

-                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and

-                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):

-                return loc+self.matchLen, self.match

-        else:

-            if (instring[loc] == self.firstMatchChar and

-                (self.matchLen==1 or instring.startswith(self.match,loc)) and

-                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and

-                (loc == 0 or instring[loc-1] not in self.identChars) ):

-                return loc+self.matchLen, self.match

-        #~ raise ParseException( instring, loc, self.errmsg )

-        exc = self.myException

-        exc.loc = loc

-        exc.pstr = instring

-        raise exc

-

-    def copy(self):

-        c = super(Keyword,self).copy()

-        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS

-        return c

-

-    def setDefaultKeywordChars( chars ):

-        """Overrides the default Keyword chars

-        """

-        Keyword.DEFAULT_KEYWORD_CHARS = chars

-    setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)

-

-class CaselessLiteral(Literal):

-    """Token to match a specified string, ignoring case of letters.

-       Note: the matched results will always be in the case of the given

-       match string, NOT the case of the input text.

-    """

-    def __init__( self, matchString ):

-        super(CaselessLiteral,self).__init__( matchString.upper() )

-        # Preserve the defining literal.

-        self.returnString = matchString

-        self.name = "'%s'" % self.returnString

-        self.errmsg = "Expected " + self.name

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if instring[ loc:loc+self.matchLen ].upper() == self.match:

-            return loc+self.matchLen, self.returnString

-        #~ raise ParseException( instring, loc, self.errmsg )

-        exc = self.myException

-        exc.loc = loc

-        exc.pstr = instring

-        raise exc

-

-class CaselessKeyword(Keyword):

-    def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):

-        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and

-             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):

-            return loc+self.matchLen, self.match

-        #~ raise ParseException( instring, loc, self.errmsg )

-        exc = self.myException

-        exc.loc = loc

-        exc.pstr = instring

-        raise exc

-

-class Word(Token):

-    """Token for matching words composed of allowed character sets.

-       Defined with string containing all allowed initial characters,

-       an optional string containing allowed body characters (if omitted,

-       defaults to the initial character set), and an optional minimum,

-       maximum, and/or exact length.  The default value for C{min} is 1 (a

-       minimum value < 1 is not valid); the default values for C{max} and C{exact}

-       are 0, meaning no maximum or exact length restriction. An optional

-       C{exclude} parameter can list characters that might be found in 

-       the input C{bodyChars} string; useful to define a word of all printables

-       except for one or two characters, for instance.

-    """

-    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):

-        super(Word,self).__init__()

-        if excludeChars:

-            initChars = ''.join([c for c in initChars if c not in excludeChars])

-            if bodyChars:

-                bodyChars = ''.join([c for c in bodyChars if c not in excludeChars])

-        self.initCharsOrig = initChars

-        self.initChars = set(initChars)

-        if bodyChars :

-            self.bodyCharsOrig = bodyChars

-            self.bodyChars = set(bodyChars)

-        else:

-            self.bodyCharsOrig = initChars

-            self.bodyChars = set(initChars)

-

-        self.maxSpecified = max > 0

-

-        if min < 1:

-            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")

-

-        self.minLen = min

-

-        if max > 0:

-            self.maxLen = max

-        else:

-            self.maxLen = _MAX_INT

-

-        if exact > 0:

-            self.maxLen = exact

-            self.minLen = exact

-

-        self.name = _ustr(self)

-        self.errmsg = "Expected " + self.name

-        self.mayIndexError = False

-        self.asKeyword = asKeyword

-

-        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):

-            if self.bodyCharsOrig == self.initCharsOrig:

-                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)

-            elif len(self.bodyCharsOrig) == 1:

-                self.reString = "%s[%s]*" % \

-                                      (re.escape(self.initCharsOrig),

-                                      _escapeRegexRangeChars(self.bodyCharsOrig),)

-            else:

-                self.reString = "[%s][%s]*" % \

-                                      (_escapeRegexRangeChars(self.initCharsOrig),

-                                      _escapeRegexRangeChars(self.bodyCharsOrig),)

-            if self.asKeyword:

-                self.reString = r"\b"+self.reString+r"\b"

-            try:

-                self.re = re.compile( self.reString )

-            except:

-                self.re = None

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if self.re:

-            result = self.re.match(instring,loc)

-            if not result:

-                exc = self.myException

-                exc.loc = loc

-                exc.pstr = instring

-                raise exc

-

-            loc = result.end()

-            return loc, result.group()

-

-        if not(instring[ loc ] in self.initChars):

-            #~ raise ParseException( instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-        start = loc

-        loc += 1

-        instrlen = len(instring)

-        bodychars = self.bodyChars

-        maxloc = start + self.maxLen

-        maxloc = min( maxloc, instrlen )

-        while loc < maxloc and instring[loc] in bodychars:

-            loc += 1

-

-        throwException = False

-        if loc - start < self.minLen:

-            throwException = True

-        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:

-            throwException = True

-        if self.asKeyword:

-            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):

-                throwException = True

-

-        if throwException:

-            #~ raise ParseException( instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-        return loc, instring[start:loc]

-

-    def __str__( self ):

-        try:

-            return super(Word,self).__str__()

-        except:

-            pass

-

-

-        if self.strRepr is None:

-

-            def charsAsStr(s):

-                if len(s)>4:

-                    return s[:4]+"..."

-                else:

-                    return s

-

-            if ( self.initCharsOrig != self.bodyCharsOrig ):

-                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )

-            else:

-                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)

-

-        return self.strRepr

-

-

-class Regex(Token):

-    """Token for matching strings that match a given regular expression.

-       Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.

-    """

-    compiledREtype = type(re.compile("[A-Z]"))

-    def __init__( self, pattern, flags=0):

-        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""

-        super(Regex,self).__init__()

-

-        if isinstance(pattern, basestring):

-            if len(pattern) == 0:

-                warnings.warn("null string passed to Regex; use Empty() instead",

-                        SyntaxWarning, stacklevel=2)

-

-            self.pattern = pattern

-            self.flags = flags

-

-            try:

-                self.re = re.compile(self.pattern, self.flags)

-                self.reString = self.pattern

-            except sre_constants.error:

-                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,

-                    SyntaxWarning, stacklevel=2)

-                raise

-

-        elif isinstance(pattern, Regex.compiledREtype):

-            self.re = pattern

-            self.pattern = \

-            self.reString = str(pattern)

-            self.flags = flags

-            

-        else:

-            raise ValueError("Regex may only be constructed with a string or a compiled RE object")

-

-        self.name = _ustr(self)

-        self.errmsg = "Expected " + self.name

-        self.mayIndexError = False

-        self.mayReturnEmpty = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        result = self.re.match(instring,loc)

-        if not result:

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-        loc = result.end()

-        d = result.groupdict()

-        ret = ParseResults(result.group())

-        if d:

-            for k in d:

-                ret[k] = d[k]

-        return loc,ret

-

-    def __str__( self ):

-        try:

-            return super(Regex,self).__str__()

-        except:

-            pass

-

-        if self.strRepr is None:

-            self.strRepr = "Re:(%s)" % repr(self.pattern)

-

-        return self.strRepr

-

-

-class QuotedString(Token):

-    """Token for matching strings that are delimited by quoting characters.

-    """

-    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):

-        """

-           Defined with the following parameters:

-            - quoteChar - string of one or more characters defining the quote delimiting string

-            - escChar - character to escape quotes, typically backslash (default=None)

-            - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)

-            - multiline - boolean indicating whether quotes can span multiple lines (default=False)

-            - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)

-            - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)

-        """

-        super(QuotedString,self).__init__()

-

-        # remove white space from quote chars - wont work anyway

-        quoteChar = quoteChar.strip()

-        if len(quoteChar) == 0:

-            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)

-            raise SyntaxError()

-

-        if endQuoteChar is None:

-            endQuoteChar = quoteChar

-        else:

-            endQuoteChar = endQuoteChar.strip()

-            if len(endQuoteChar) == 0:

-                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)

-                raise SyntaxError()

-

-        self.quoteChar = quoteChar

-        self.quoteCharLen = len(quoteChar)

-        self.firstQuoteChar = quoteChar[0]

-        self.endQuoteChar = endQuoteChar

-        self.endQuoteCharLen = len(endQuoteChar)

-        self.escChar = escChar

-        self.escQuote = escQuote

-        self.unquoteResults = unquoteResults

-

-        if multiline:

-            self.flags = re.MULTILINE | re.DOTALL

-            self.pattern = r'%s(?:[^%s%s]' % \

-                ( re.escape(self.quoteChar),

-                  _escapeRegexRangeChars(self.endQuoteChar[0]),

-                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )

-        else:

-            self.flags = 0

-            self.pattern = r'%s(?:[^%s\n\r%s]' % \

-                ( re.escape(self.quoteChar),

-                  _escapeRegexRangeChars(self.endQuoteChar[0]),

-                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )

-        if len(self.endQuoteChar) > 1:

-            self.pattern += (

-                '|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),

-                                               _escapeRegexRangeChars(self.endQuoteChar[i]))

-                                    for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'

-                )

-        if escQuote:

-            self.pattern += (r'|(?:%s)' % re.escape(escQuote))

-        if escChar:

-            self.pattern += (r'|(?:%s.)' % re.escape(escChar))

-            charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')

-            self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)

-        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))

-

-        try:

-            self.re = re.compile(self.pattern, self.flags)

-            self.reString = self.pattern

-        except sre_constants.error:

-            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,

-                SyntaxWarning, stacklevel=2)

-            raise

-

-        self.name = _ustr(self)

-        self.errmsg = "Expected " + self.name

-        self.mayIndexError = False

-        self.mayReturnEmpty = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None

-        if not result:

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-        loc = result.end()

-        ret = result.group()

-

-        if self.unquoteResults:

-

-            # strip off quotes

-            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]

-

-            if isinstance(ret,basestring):

-                # replace escaped characters

-                if self.escChar:

-                    ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)

-

-                # replace escaped quotes

-                if self.escQuote:

-                    ret = ret.replace(self.escQuote, self.endQuoteChar)

-

-        return loc, ret

-

-    def __str__( self ):

-        try:

-            return super(QuotedString,self).__str__()

-        except:

-            pass

-

-        if self.strRepr is None:

-            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)

-

-        return self.strRepr

-

-

-class CharsNotIn(Token):

-    """Token for matching words composed of characters *not* in a given set.

-       Defined with string containing all disallowed characters, and an optional

-       minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a

-       minimum value < 1 is not valid); the default values for C{max} and C{exact}

-       are 0, meaning no maximum or exact length restriction.

-    """

-    def __init__( self, notChars, min=1, max=0, exact=0 ):

-        super(CharsNotIn,self).__init__()

-        self.skipWhitespace = False

-        self.notChars = notChars

-

-        if min < 1:

-            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")

-

-        self.minLen = min

-

-        if max > 0:

-            self.maxLen = max

-        else:

-            self.maxLen = _MAX_INT

-

-        if exact > 0:

-            self.maxLen = exact

-            self.minLen = exact

-

-        self.name = _ustr(self)

-        self.errmsg = "Expected " + self.name

-        self.mayReturnEmpty = ( self.minLen == 0 )

-        self.mayIndexError = False

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if instring[loc] in self.notChars:

-            #~ raise ParseException( instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-        start = loc

-        loc += 1

-        notchars = self.notChars

-        maxlen = min( start+self.maxLen, len(instring) )

-        while loc < maxlen and \

-              (instring[loc] not in notchars):

-            loc += 1

-

-        if loc - start < self.minLen:

-            #~ raise ParseException( instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-        return loc, instring[start:loc]

-

-    def __str__( self ):

-        try:

-            return super(CharsNotIn, self).__str__()

-        except:

-            pass

-

-        if self.strRepr is None:

-            if len(self.notChars) > 4:

-                self.strRepr = "!W:(%s...)" % self.notChars[:4]

-            else:

-                self.strRepr = "!W:(%s)" % self.notChars

-

-        return self.strRepr

-

-class White(Token):

-    """Special matching class for matching whitespace.  Normally, whitespace is ignored

-       by pyparsing grammars.  This class is included when some whitespace structures

-       are significant.  Define with a string containing the whitespace characters to be

-       matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,

-       as defined for the C{Word} class."""

-    whiteStrs = {

-        " " : "<SPC>",

-        "\t": "<TAB>",

-        "\n": "<LF>",

-        "\r": "<CR>",

-        "\f": "<FF>",

-        }

-    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):

-        super(White,self).__init__()

-        self.matchWhite = ws

-        self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )

-        #~ self.leaveWhitespace()

-        self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))

-        self.mayReturnEmpty = True

-        self.errmsg = "Expected " + self.name

-

-        self.minLen = min

-

-        if max > 0:

-            self.maxLen = max

-        else:

-            self.maxLen = _MAX_INT

-

-        if exact > 0:

-            self.maxLen = exact

-            self.minLen = exact

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if not(instring[ loc ] in self.matchWhite):

-            #~ raise ParseException( instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-        start = loc

-        loc += 1

-        maxloc = start + self.maxLen

-        maxloc = min( maxloc, len(instring) )

-        while loc < maxloc and instring[loc] in self.matchWhite:

-            loc += 1

-

-        if loc - start < self.minLen:

-            #~ raise ParseException( instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-        return loc, instring[start:loc]

-

-

-class _PositionToken(Token):

-    def __init__( self ):

-        super(_PositionToken,self).__init__()

-        self.name=self.__class__.__name__

-        self.mayReturnEmpty = True

-        self.mayIndexError = False

-

-class GoToColumn(_PositionToken):

-    """Token to advance to a specific column of input text; useful for tabular report scraping."""

-    def __init__( self, colno ):

-        super(GoToColumn,self).__init__()

-        self.col = colno

-

-    def preParse( self, instring, loc ):

-        if col(loc,instring) != self.col:

-            instrlen = len(instring)

-            if self.ignoreExprs:

-                loc = self._skipIgnorables( instring, loc )

-            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :

-                loc += 1

-        return loc

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        thiscol = col( loc, instring )

-        if thiscol > self.col:

-            raise ParseException( instring, loc, "Text not in expected column", self )

-        newloc = loc + self.col - thiscol

-        ret = instring[ loc: newloc ]

-        return newloc, ret

-

-class LineStart(_PositionToken):

-    """Matches if current position is at the beginning of a line within the parse string"""

-    def __init__( self ):

-        super(LineStart,self).__init__()

-        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )

-        self.errmsg = "Expected start of line"

-

-    def preParse( self, instring, loc ):

-        preloc = super(LineStart,self).preParse(instring,loc)

-        if instring[preloc] == "\n":

-            loc += 1

-        return loc

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if not( loc==0 or

-            (loc == self.preParse( instring, 0 )) or

-            (instring[loc-1] == "\n") ): #col(loc, instring) != 1:

-            #~ raise ParseException( instring, loc, "Expected start of line" )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-        return loc, []

-

-class LineEnd(_PositionToken):

-    """Matches if current position is at the end of a line within the parse string"""

-    def __init__( self ):

-        super(LineEnd,self).__init__()

-        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )

-        self.errmsg = "Expected end of line"

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if loc<len(instring):

-            if instring[loc] == "\n":

-                return loc+1, "\n"

-            else:

-                #~ raise ParseException( instring, loc, "Expected end of line" )

-                exc = self.myException

-                exc.loc = loc

-                exc.pstr = instring

-                raise exc

-        elif loc == len(instring):

-            return loc+1, []

-        else:

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-class StringStart(_PositionToken):

-    """Matches if current position is at the beginning of the parse string"""

-    def __init__( self ):

-        super(StringStart,self).__init__()

-        self.errmsg = "Expected start of text"

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if loc != 0:

-            # see if entire string up to here is just whitespace and ignoreables

-            if loc != self.preParse( instring, 0 ):

-                #~ raise ParseException( instring, loc, "Expected start of text" )

-                exc = self.myException

-                exc.loc = loc

-                exc.pstr = instring

-                raise exc

-        return loc, []

-

-class StringEnd(_PositionToken):

-    """Matches if current position is at the end of the parse string"""

-    def __init__( self ):

-        super(StringEnd,self).__init__()

-        self.errmsg = "Expected end of text"

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if loc < len(instring):

-            #~ raise ParseException( instring, loc, "Expected end of text" )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-        elif loc == len(instring):

-            return loc+1, []

-        elif loc > len(instring):

-            return loc, []

-        else:

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-

-class WordStart(_PositionToken):

-    """Matches if the current position is at the beginning of a Word, and

-       is not preceded by any character in a given set of C{wordChars}

-       (default=C{printables}). To emulate the C{\b} behavior of regular expressions,

-       use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of

-       the string being parsed, or at the beginning of a line.

-    """

-    def __init__(self, wordChars = printables):

-        super(WordStart,self).__init__()

-        self.wordChars = set(wordChars)

-        self.errmsg = "Not at the start of a word"

-

-    def parseImpl(self, instring, loc, doActions=True ):

-        if loc != 0:

-            if (instring[loc-1] in self.wordChars or

-                instring[loc] not in self.wordChars):

-                exc = self.myException

-                exc.loc = loc

-                exc.pstr = instring

-                raise exc

-        return loc, []

-

-class WordEnd(_PositionToken):

-    """Matches if the current position is at the end of a Word, and

-       is not followed by any character in a given set of C{wordChars}

-       (default=C{printables}). To emulate the C{\b} behavior of regular expressions,

-       use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of

-       the string being parsed, or at the end of a line.

-    """

-    def __init__(self, wordChars = printables):

-        super(WordEnd,self).__init__()

-        self.wordChars = set(wordChars)

-        self.skipWhitespace = False

-        self.errmsg = "Not at the end of a word"

-

-    def parseImpl(self, instring, loc, doActions=True ):

-        instrlen = len(instring)

-        if instrlen>0 and loc<instrlen:

-            if (instring[loc] in self.wordChars or

-                instring[loc-1] not in self.wordChars):

-                #~ raise ParseException( instring, loc, "Expected end of word" )

-                exc = self.myException

-                exc.loc = loc

-                exc.pstr = instring

-                raise exc

-        return loc, []

-

-

-class ParseExpression(ParserElement):

-    """Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""

-    def __init__( self, exprs, savelist = False ):

-        super(ParseExpression,self).__init__(savelist)

-        if isinstance( exprs, list ):

-            self.exprs = exprs

-        elif isinstance( exprs, basestring ):

-            self.exprs = [ Literal( exprs ) ]

-        else:

-            try:

-                self.exprs = list( exprs )

-            except TypeError:

-                self.exprs = [ exprs ]

-        self.callPreparse = False

-

-    def __getitem__( self, i ):

-        return self.exprs[i]

-

-    def append( self, other ):

-        self.exprs.append( other )

-        self.strRepr = None

-        return self

-

-    def leaveWhitespace( self ):

-        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on

-           all contained expressions."""

-        self.skipWhitespace = False

-        self.exprs = [ e.copy() for e in self.exprs ]

-        for e in self.exprs:

-            e.leaveWhitespace()

-        return self

-

-    def ignore( self, other ):

-        if isinstance( other, Suppress ):

-            if other not in self.ignoreExprs:

-                super( ParseExpression, self).ignore( other )

-                for e in self.exprs:

-                    e.ignore( self.ignoreExprs[-1] )

-        else:

-            super( ParseExpression, self).ignore( other )

-            for e in self.exprs:

-                e.ignore( self.ignoreExprs[-1] )

-        return self

-

-    def __str__( self ):

-        try:

-            return super(ParseExpression,self).__str__()

-        except:

-            pass

-

-        if self.strRepr is None:

-            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )

-        return self.strRepr

-

-    def streamline( self ):

-        super(ParseExpression,self).streamline()

-

-        for e in self.exprs:

-            e.streamline()

-

-        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )

-        # but only if there are no parse actions or resultsNames on the nested And's

-        # (likewise for Or's and MatchFirst's)

-        if ( len(self.exprs) == 2 ):

-            other = self.exprs[0]

-            if ( isinstance( other, self.__class__ ) and

-                  not(other.parseAction) and

-                  other.resultsName is None and

-                  not other.debug ):

-                self.exprs = other.exprs[:] + [ self.exprs[1] ]

-                self.strRepr = None

-                self.mayReturnEmpty |= other.mayReturnEmpty

-                self.mayIndexError  |= other.mayIndexError

-

-            other = self.exprs[-1]

-            if ( isinstance( other, self.__class__ ) and

-                  not(other.parseAction) and

-                  other.resultsName is None and

-                  not other.debug ):

-                self.exprs = self.exprs[:-1] + other.exprs[:]

-                self.strRepr = None

-                self.mayReturnEmpty |= other.mayReturnEmpty

-                self.mayIndexError  |= other.mayIndexError

-

-        return self

-

-    def setResultsName( self, name, listAllMatches=False ):

-        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)

-        return ret

-

-    def validate( self, validateTrace=[] ):

-        tmp = validateTrace[:]+[self]

-        for e in self.exprs:

-            e.validate(tmp)

-        self.checkRecursion( [] )

-        

-    def copy(self):

-        ret = super(ParseExpression,self).copy()

-        ret.exprs = [e.copy() for e in self.exprs]

-        return ret

-

-class And(ParseExpression):

-    """Requires all given C{ParseExpression}s to be found in the given order.

-       Expressions may be separated by whitespace.

-       May be constructed using the C{'+'} operator.

-    """

-

-    class _ErrorStop(Empty):

-        def __init__(self, *args, **kwargs):

-            super(Empty,self).__init__(*args, **kwargs)

-            self.leaveWhitespace()

-

-    def __init__( self, exprs, savelist = True ):

-        super(And,self).__init__(exprs, savelist)

-        self.mayReturnEmpty = True

-        for e in self.exprs:

-            if not e.mayReturnEmpty:

-                self.mayReturnEmpty = False

-                break

-        self.setWhitespaceChars( exprs[0].whiteChars )

-        self.skipWhitespace = exprs[0].skipWhitespace

-        self.callPreparse = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        # pass False as last arg to _parse for first element, since we already

-        # pre-parsed the string as part of our And pre-parsing

-        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )

-        errorStop = False

-        for e in self.exprs[1:]:

-            if isinstance(e, And._ErrorStop):

-                errorStop = True

-                continue

-            if errorStop:

-                try:

-                    loc, exprtokens = e._parse( instring, loc, doActions )

-                except ParseSyntaxException:

-                    raise

-                except ParseBaseException:

-                    pe = sys.exc_info()[1]

-                    raise ParseSyntaxException(pe)

-                except IndexError:

-                    raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )

-            else:

-                loc, exprtokens = e._parse( instring, loc, doActions )

-            if exprtokens or exprtokens.keys():

-                resultlist += exprtokens

-        return loc, resultlist

-

-    def __iadd__(self, other ):

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        return self.append( other ) #And( [ self, other ] )

-

-    def checkRecursion( self, parseElementList ):

-        subRecCheckList = parseElementList[:] + [ self ]

-        for e in self.exprs:

-            e.checkRecursion( subRecCheckList )

-            if not e.mayReturnEmpty:

-                break

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"

-

-        return self.strRepr

-

-

-class Or(ParseExpression):

-    """Requires that at least one C{ParseExpression} is found.

-       If two expressions match, the expression that matches the longest string will be used.

-       May be constructed using the C{'^'} operator.

-    """

-    def __init__( self, exprs, savelist = False ):

-        super(Or,self).__init__(exprs, savelist)

-        self.mayReturnEmpty = False

-        for e in self.exprs:

-            if e.mayReturnEmpty:

-                self.mayReturnEmpty = True

-                break

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        maxExcLoc = -1

-        maxMatchLoc = -1

-        maxException = None

-        for e in self.exprs:

-            try:

-                loc2 = e.tryParse( instring, loc )

-            except ParseException:

-                err = sys.exc_info()[1]

-                if err.loc > maxExcLoc:

-                    maxException = err

-                    maxExcLoc = err.loc

-            except IndexError:

-                if len(instring) > maxExcLoc:

-                    maxException = ParseException(instring,len(instring),e.errmsg,self)

-                    maxExcLoc = len(instring)

-            else:

-                if loc2 > maxMatchLoc:

-                    maxMatchLoc = loc2

-                    maxMatchExp = e

-

-        if maxMatchLoc < 0:

-            if maxException is not None:

-                raise maxException

-            else:

-                raise ParseException(instring, loc, "no defined alternatives to match", self)

-

-        return maxMatchExp._parse( instring, loc, doActions )

-

-    def __ixor__(self, other ):

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        return self.append( other ) #Or( [ self, other ] )

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"

-

-        return self.strRepr

-

-    def checkRecursion( self, parseElementList ):

-        subRecCheckList = parseElementList[:] + [ self ]

-        for e in self.exprs:

-            e.checkRecursion( subRecCheckList )

-

-

-class MatchFirst(ParseExpression):

-    """Requires that at least one C{ParseExpression} is found.

-       If two expressions match, the first one listed is the one that will match.

-       May be constructed using the C{'|'} operator.

-    """

-    def __init__( self, exprs, savelist = False ):

-        super(MatchFirst,self).__init__(exprs, savelist)

-        if exprs:

-            self.mayReturnEmpty = False

-            for e in self.exprs:

-                if e.mayReturnEmpty:

-                    self.mayReturnEmpty = True

-                    break

-        else:

-            self.mayReturnEmpty = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        maxExcLoc = -1

-        maxException = None

-        for e in self.exprs:

-            try:

-                ret = e._parse( instring, loc, doActions )

-                return ret

-            except ParseException, err:

-                if err.loc > maxExcLoc:

-                    maxException = err

-                    maxExcLoc = err.loc

-            except IndexError:

-                if len(instring) > maxExcLoc:

-                    maxException = ParseException(instring,len(instring),e.errmsg,self)

-                    maxExcLoc = len(instring)

-

-        # only got here if no expression matched, raise exception for match that made it the furthest

-        else:

-            if maxException is not None:

-                raise maxException

-            else:

-                raise ParseException(instring, loc, "no defined alternatives to match", self)

-

-    def __ior__(self, other ):

-        if isinstance( other, basestring ):

-            other = Literal( other )

-        return self.append( other ) #MatchFirst( [ self, other ] )

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"

-

-        return self.strRepr

-

-    def checkRecursion( self, parseElementList ):

-        subRecCheckList = parseElementList[:] + [ self ]

-        for e in self.exprs:

-            e.checkRecursion( subRecCheckList )

-

-

-class Each(ParseExpression):

-    """Requires all given C{ParseExpression}s to be found, but in any order.

-       Expressions may be separated by whitespace.

-       May be constructed using the C{'&'} operator.

-    """

-    def __init__( self, exprs, savelist = True ):

-        super(Each,self).__init__(exprs, savelist)

-        self.mayReturnEmpty = True

-        for e in self.exprs:

-            if not e.mayReturnEmpty:

-                self.mayReturnEmpty = False

-                break

-        self.skipWhitespace = True

-        self.initExprGroups = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if self.initExprGroups:

-            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]

-            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]

-            self.optionals = opt1 + opt2

-            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]

-            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]

-            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]

-            self.required += self.multirequired

-            self.initExprGroups = False

-        tmpLoc = loc

-        tmpReqd = self.required[:]

-        tmpOpt  = self.optionals[:]

-        matchOrder = []

-

-        keepMatching = True

-        while keepMatching:

-            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired

-            failed = []

-            for e in tmpExprs:

-                try:

-                    tmpLoc = e.tryParse( instring, tmpLoc )

-                except ParseException:

-                    failed.append(e)

-                else:

-                    matchOrder.append(e)

-                    if e in tmpReqd:

-                        tmpReqd.remove(e)

-                    elif e in tmpOpt:

-                        tmpOpt.remove(e)

-            if len(failed) == len(tmpExprs):

-                keepMatching = False

-

-        if tmpReqd:

-            missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )

-            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )

-

-        # add any unmatched Optionals, in case they have default values defined

-        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]

-

-        resultlist = []

-        for e in matchOrder:

-            loc,results = e._parse(instring,loc,doActions)

-            resultlist.append(results)

-

-        finalResults = ParseResults([])

-        for r in resultlist:

-            dups = {}

-            for k in r.keys():

-                if k in finalResults.keys():

-                    tmp = ParseResults(finalResults[k])

-                    tmp += ParseResults(r[k])

-                    dups[k] = tmp

-            finalResults += ParseResults(r)

-            for k,v in dups.items():

-                finalResults[k] = v

-        return loc, finalResults

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"

-

-        return self.strRepr

-

-    def checkRecursion( self, parseElementList ):

-        subRecCheckList = parseElementList[:] + [ self ]

-        for e in self.exprs:

-            e.checkRecursion( subRecCheckList )

-

-

-class ParseElementEnhance(ParserElement):

-    """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""

-    def __init__( self, expr, savelist=False ):

-        super(ParseElementEnhance,self).__init__(savelist)

-        if isinstance( expr, basestring ):

-            expr = Literal(expr)

-        self.expr = expr

-        self.strRepr = None

-        if expr is not None:

-            self.mayIndexError = expr.mayIndexError

-            self.mayReturnEmpty = expr.mayReturnEmpty

-            self.setWhitespaceChars( expr.whiteChars )

-            self.skipWhitespace = expr.skipWhitespace

-            self.saveAsList = expr.saveAsList

-            self.callPreparse = expr.callPreparse

-            self.ignoreExprs.extend(expr.ignoreExprs)

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        if self.expr is not None:

-            return self.expr._parse( instring, loc, doActions, callPreParse=False )

-        else:

-            raise ParseException("",loc,self.errmsg,self)

-

-    def leaveWhitespace( self ):

-        self.skipWhitespace = False

-        self.expr = self.expr.copy()

-        if self.expr is not None:

-            self.expr.leaveWhitespace()

-        return self

-

-    def ignore( self, other ):

-        if isinstance( other, Suppress ):

-            if other not in self.ignoreExprs:

-                super( ParseElementEnhance, self).ignore( other )

-                if self.expr is not None:

-                    self.expr.ignore( self.ignoreExprs[-1] )

-        else:

-            super( ParseElementEnhance, self).ignore( other )

-            if self.expr is not None:

-                self.expr.ignore( self.ignoreExprs[-1] )

-        return self

-

-    def streamline( self ):

-        super(ParseElementEnhance,self).streamline()

-        if self.expr is not None:

-            self.expr.streamline()

-        return self

-

-    def checkRecursion( self, parseElementList ):

-        if self in parseElementList:

-            raise RecursiveGrammarException( parseElementList+[self] )

-        subRecCheckList = parseElementList[:] + [ self ]

-        if self.expr is not None:

-            self.expr.checkRecursion( subRecCheckList )

-

-    def validate( self, validateTrace=[] ):

-        tmp = validateTrace[:]+[self]

-        if self.expr is not None:

-            self.expr.validate(tmp)

-        self.checkRecursion( [] )

-

-    def __str__( self ):

-        try:

-            return super(ParseElementEnhance,self).__str__()

-        except:

-            pass

-

-        if self.strRepr is None and self.expr is not None:

-            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )

-        return self.strRepr

-

-

-class FollowedBy(ParseElementEnhance):

-    """Lookahead matching of the given parse expression.  C{FollowedBy}

-    does *not* advance the parsing position within the input string, it only

-    verifies that the specified parse expression matches at the current

-    position.  C{FollowedBy} always returns a null token list."""

-    def __init__( self, expr ):

-        super(FollowedBy,self).__init__(expr)

-        self.mayReturnEmpty = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        self.expr.tryParse( instring, loc )

-        return loc, []

-

-

-class NotAny(ParseElementEnhance):

-    """Lookahead to disallow matching with the given parse expression.  C{NotAny}

-    does *not* advance the parsing position within the input string, it only

-    verifies that the specified parse expression does *not* match at the current

-    position.  Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}

-    always returns a null token list.  May be constructed using the '~' operator."""

-    def __init__( self, expr ):

-        super(NotAny,self).__init__(expr)

-        #~ self.leaveWhitespace()

-        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs

-        self.mayReturnEmpty = True

-        self.errmsg = "Found unwanted token, "+_ustr(self.expr)

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        try:

-            self.expr.tryParse( instring, loc )

-        except (ParseException,IndexError):

-            pass

-        else:

-            #~ raise ParseException(instring, loc, self.errmsg )

-            exc = self.myException

-            exc.loc = loc

-            exc.pstr = instring

-            raise exc

-        return loc, []

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "~{" + _ustr(self.expr) + "}"

-

-        return self.strRepr

-

-

-class ZeroOrMore(ParseElementEnhance):

-    """Optional repetition of zero or more of the given expression."""

-    def __init__( self, expr ):

-        super(ZeroOrMore,self).__init__(expr)

-        self.mayReturnEmpty = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        tokens = []

-        try:

-            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )

-            hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )

-            while 1:

-                if hasIgnoreExprs:

-                    preloc = self._skipIgnorables( instring, loc )

-                else:

-                    preloc = loc

-                loc, tmptokens = self.expr._parse( instring, preloc, doActions )

-                if tmptokens or tmptokens.keys():

-                    tokens += tmptokens

-        except (ParseException,IndexError):

-            pass

-

-        return loc, tokens

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "[" + _ustr(self.expr) + "]..."

-

-        return self.strRepr

-

-    def setResultsName( self, name, listAllMatches=False ):

-        ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)

-        ret.saveAsList = True

-        return ret

-

-

-class OneOrMore(ParseElementEnhance):

-    """Repetition of one or more of the given expression."""

-    def parseImpl( self, instring, loc, doActions=True ):

-        # must be at least one

-        loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )

-        try:

-            hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )

-            while 1:

-                if hasIgnoreExprs:

-                    preloc = self._skipIgnorables( instring, loc )

-                else:

-                    preloc = loc

-                loc, tmptokens = self.expr._parse( instring, preloc, doActions )

-                if tmptokens or tmptokens.keys():

-                    tokens += tmptokens

-        except (ParseException,IndexError):

-            pass

-

-        return loc, tokens

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "{" + _ustr(self.expr) + "}..."

-

-        return self.strRepr

-

-    def setResultsName( self, name, listAllMatches=False ):

-        ret = super(OneOrMore,self).setResultsName(name,listAllMatches)

-        ret.saveAsList = True

-        return ret

-

-class _NullToken(object):

-    def __bool__(self):

-        return False

-    __nonzero__ = __bool__

-    def __str__(self):

-        return ""

-

-_optionalNotMatched = _NullToken()

-class Optional(ParseElementEnhance):

-    """Optional matching of the given expression.

-       A default return string can also be specified, if the optional expression

-       is not found.

-    """

-    def __init__( self, exprs, default=_optionalNotMatched ):

-        super(Optional,self).__init__( exprs, savelist=False )

-        self.defaultValue = default

-        self.mayReturnEmpty = True

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        try:

-            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )

-        except (ParseException,IndexError):

-            if self.defaultValue is not _optionalNotMatched:

-                if self.expr.resultsName:

-                    tokens = ParseResults([ self.defaultValue ])

-                    tokens[self.expr.resultsName] = self.defaultValue

-                else:

-                    tokens = [ self.defaultValue ]

-            else:

-                tokens = []

-        return loc, tokens

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        if self.strRepr is None:

-            self.strRepr = "[" + _ustr(self.expr) + "]"

-

-        return self.strRepr

-

-

-class SkipTo(ParseElementEnhance):

-    """Token for skipping over all undefined text until the matched expression is found.

-       If C{include} is set to true, the matched expression is also parsed (the skipped text

-       and matched expression are returned as a 2-element list).  The C{ignore}

-       argument is used to define grammars (typically quoted strings and comments) that

-       might contain false matches.

-    """

-    def __init__( self, other, include=False, ignore=None, failOn=None ):

-        super( SkipTo, self ).__init__( other )

-        self.ignoreExpr = ignore

-        self.mayReturnEmpty = True

-        self.mayIndexError = False

-        self.includeMatch = include

-        self.asList = False

-        if failOn is not None and isinstance(failOn, basestring):

-            self.failOn = Literal(failOn)

-        else:

-            self.failOn = failOn

-        self.errmsg = "No match found for "+_ustr(self.expr)

-

-    def parseImpl( self, instring, loc, doActions=True ):

-        startLoc = loc

-        instrlen = len(instring)

-        expr = self.expr

-        failParse = False

-        while loc <= instrlen:

-            try:

-                if self.failOn:

-                    try:

-                        self.failOn.tryParse(instring, loc)

-                    except ParseBaseException:

-                        pass

-                    else:

-                        failParse = True

-                        raise ParseException(instring, loc, "Found expression " + str(self.failOn))

-                    failParse = False

-                if self.ignoreExpr is not None:

-                    while 1:

-                        try:

-                            loc = self.ignoreExpr.tryParse(instring,loc)

-                            # print "found ignoreExpr, advance to", loc

-                        except ParseBaseException:

-                            break

-                expr._parse( instring, loc, doActions=False, callPreParse=False )

-                skipText = instring[startLoc:loc]

-                if self.includeMatch:

-                    loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)

-                    if mat:

-                        skipRes = ParseResults( skipText )

-                        skipRes += mat

-                        return loc, [ skipRes ]

-                    else:

-                        return loc, [ skipText ]

-                else:

-                    return loc, [ skipText ]

-            except (ParseException,IndexError):

-                if failParse:

-                    raise

-                else:

-                    loc += 1

-        exc = self.myException

-        exc.loc = loc

-        exc.pstr = instring

-        raise exc

-

-class Forward(ParseElementEnhance):

-    """Forward declaration of an expression to be defined later -

-       used for recursive grammars, such as algebraic infix notation.

-       When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

-

-       Note: take care when assigning to C{Forward} not to overlook precedence of operators.

-       Specifically, '|' has a lower precedence than '<<', so that::

-          fwdExpr << a | b | c

-       will actually be evaluated as::

-          (fwdExpr << a) | b | c

-       thereby leaving b and c out as parseable alternatives.  It is recommended that you

-       explicitly group the values inserted into the C{Forward}::

-          fwdExpr << (a | b | c)

-    """

-    def __init__( self, other=None ):

-        super(Forward,self).__init__( other, savelist=False )

-

-    def __lshift__( self, other ):

-        if isinstance( other, basestring ):

-            other = Literal(other)

-        self.expr = other

-        self.mayReturnEmpty = other.mayReturnEmpty

-        self.strRepr = None

-        self.mayIndexError = self.expr.mayIndexError

-        self.mayReturnEmpty = self.expr.mayReturnEmpty

-        self.setWhitespaceChars( self.expr.whiteChars )

-        self.skipWhitespace = self.expr.skipWhitespace

-        self.saveAsList = self.expr.saveAsList

-        self.ignoreExprs.extend(self.expr.ignoreExprs)

-        return None

-

-    def leaveWhitespace( self ):

-        self.skipWhitespace = False

-        return self

-

-    def streamline( self ):

-        if not self.streamlined:

-            self.streamlined = True

-            if self.expr is not None:

-                self.expr.streamline()

-        return self

-

-    def validate( self, validateTrace=[] ):

-        if self not in validateTrace:

-            tmp = validateTrace[:]+[self]

-            if self.expr is not None:

-                self.expr.validate(tmp)

-        self.checkRecursion([])

-

-    def __str__( self ):

-        if hasattr(self,"name"):

-            return self.name

-

-        self._revertClass = self.__class__

-        self.__class__ = _ForwardNoRecurse

-        try:

-            if self.expr is not None:

-                retString = _ustr(self.expr)

-            else:

-                retString = "None"

-        finally:

-            self.__class__ = self._revertClass

-        return self.__class__.__name__ + ": " + retString

-

-    def copy(self):

-        if self.expr is not None:

-            return super(Forward,self).copy()

-        else:

-            ret = Forward()

-            ret << self

-            return ret

-

-class _ForwardNoRecurse(Forward):

-    def __str__( self ):

-        return "..."

-

-class TokenConverter(ParseElementEnhance):

-    """Abstract subclass of C{ParseExpression}, for converting parsed results."""

-    def __init__( self, expr, savelist=False ):

-        super(TokenConverter,self).__init__( expr )#, savelist )

-        self.saveAsList = False

-

-class Upcase(TokenConverter):

-    """Converter to upper case all matching tokens."""

-    def __init__(self, *args):

-        super(Upcase,self).__init__(*args)

-        warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",

-                       DeprecationWarning,stacklevel=2)

-

-    def postParse( self, instring, loc, tokenlist ):

-        return list(map( string.upper, tokenlist ))

-

-

-class Combine(TokenConverter):

-    """Converter to concatenate all matching tokens to a single string.

-       By default, the matching patterns must also be contiguous in the input string;

-       this can be disabled by specifying C{'adjacent=False'} in the constructor.

-    """

-    def __init__( self, expr, joinString="", adjacent=True ):

-        super(Combine,self).__init__( expr )

-        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself

-        if adjacent:

-            self.leaveWhitespace()

-        self.adjacent = adjacent

-        self.skipWhitespace = True

-        self.joinString = joinString

-        self.callPreparse = True

-

-    def ignore( self, other ):

-        if self.adjacent:

-            ParserElement.ignore(self, other)

-        else:

-            super( Combine, self).ignore( other )

-        return self

-

-    def postParse( self, instring, loc, tokenlist ):

-        retToks = tokenlist.copy()

-        del retToks[:]

-        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)

-

-        if self.resultsName and len(retToks.keys())>0:

-            return [ retToks ]

-        else:

-            return retToks

-

-class Group(TokenConverter):

-    """Converter to return the matched tokens as a list - useful for returning tokens of C{ZeroOrMore} and C{OneOrMore} expressions."""

-    def __init__( self, expr ):

-        super(Group,self).__init__( expr )

-        self.saveAsList = True

-

-    def postParse( self, instring, loc, tokenlist ):

-        return [ tokenlist ]

-

-class Dict(TokenConverter):

-    """Converter to return a repetitive expression as a list, but also as a dictionary.

-       Each element can also be referenced using the first token in the expression as its key.

-       Useful for tabular report scraping when the first column can be used as a item key.

-    """

-    def __init__( self, exprs ):

-        super(Dict,self).__init__( exprs )

-        self.saveAsList = True

-

-    def postParse( self, instring, loc, tokenlist ):

-        for i,tok in enumerate(tokenlist):

-            if len(tok) == 0:

-                continue

-            ikey = tok[0]

-            if isinstance(ikey,int):

-                ikey = _ustr(tok[0]).strip()

-            if len(tok)==1:

-                tokenlist[ikey] = _ParseResultsWithOffset("",i)

-            elif len(tok)==2 and not isinstance(tok[1],ParseResults):

-                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)

-            else:

-                dictvalue = tok.copy() #ParseResults(i)

-                del dictvalue[0]

-                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):

-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)

-                else:

-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)

-

-        if self.resultsName:

-            return [ tokenlist ]

-        else:

-            return tokenlist

-

-

-class Suppress(TokenConverter):

-    """Converter for ignoring the results of a parsed expression."""

-    def postParse( self, instring, loc, tokenlist ):

-        return []

-

-    def suppress( self ):

-        return self

-

-

-class OnlyOnce(object):

-    """Wrapper for parse actions, to ensure they are only called once."""

-    def __init__(self, methodCall):

-        self.callable = _trim_arity(methodCall)

-        self.called = False

-    def __call__(self,s,l,t):

-        if not self.called:

-            results = self.callable(s,l,t)

-            self.called = True

-            return results

-        raise ParseException(s,l,"")

-    def reset(self):

-        self.called = False

-

-def traceParseAction(f):

-    """Decorator for debugging parse actions."""

-    f = _trim_arity(f)

-    def z(*paArgs):

-        thisFunc = f.func_name

-        s,l,t = paArgs[-3:]

-        if len(paArgs)>3:

-            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc

-        sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )

-        try:

-            ret = f(*paArgs)

-        except Exception:

-            exc = sys.exc_info()[1]

-            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )

-            raise

-        sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )

-        return ret

-    try:

-        z.__name__ = f.__name__

-    except AttributeError:

-        pass

-    return z

-

-#

-# global helpers

-#

-def delimitedList( expr, delim=",", combine=False ):

-    """Helper to define a delimited list of expressions - the delimiter defaults to ','.

-       By default, the list elements and delimiters can have intervening whitespace, and

-       comments, but this can be overridden by passing C{combine=True} in the constructor.

-       If C{combine} is set to True, the matching tokens are returned as a single token

-       string, with the delimiters included; otherwise, the matching tokens are returned

-       as a list of tokens, with the delimiters suppressed.

-    """

-    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."

-    if combine:

-        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)

-    else:

-        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)

-

-def countedArray( expr, intExpr=None ):

-    """Helper to define a counted list of expressions.

-       This helper defines a pattern of the form::

-           integer expr expr expr...

-       where the leading integer tells how many expr expressions follow.

-       The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.

-    """

-    arrayExpr = Forward()

-    def countFieldParseAction(s,l,t):

-        n = t[0]

-        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))

-        return []

-    if intExpr is None:

-        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))

-    else:

-        intExpr = intExpr.copy()

-    intExpr.setName("arrayLen")

-    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)

-    return ( intExpr + arrayExpr )

-

-def _flatten(L):

-    ret = []

-    for i in L:

-        if isinstance(i,list):

-            ret.extend(_flatten(i))

-        else:

-            ret.append(i)

-    return ret

-

-def matchPreviousLiteral(expr):

-    """Helper to define an expression that is indirectly defined from

-       the tokens matched in a previous expression, that is, it looks

-       for a 'repeat' of a previous expression.  For example::

-           first = Word(nums)

-           second = matchPreviousLiteral(first)

-           matchExpr = first + ":" + second

-       will match C{"1:1"}, but not C{"1:2"}.  Because this matches a

-       previous literal, will also match the leading C{"1:1"} in C{"1:10"}.

-       If this is not desired, use C{matchPreviousExpr}.

-       Do *not* use with packrat parsing enabled.

-    """

-    rep = Forward()

-    def copyTokenToRepeater(s,l,t):

-        if t:

-            if len(t) == 1:

-                rep << t[0]

-            else:

-                # flatten t tokens

-                tflat = _flatten(t.asList())

-                rep << And( [ Literal(tt) for tt in tflat ] )

-        else:

-            rep << Empty()

-    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)

-    return rep

-

-def matchPreviousExpr(expr):

-    """Helper to define an expression that is indirectly defined from

-       the tokens matched in a previous expression, that is, it looks

-       for a 'repeat' of a previous expression.  For example::

-           first = Word(nums)

-           second = matchPreviousExpr(first)

-           matchExpr = first + ":" + second

-       will match C{"1:1"}, but not C{"1:2"}.  Because this matches by

-       expressions, will *not* match the leading C{"1:1"} in C{"1:10"};

-       the expressions are evaluated first, and then compared, so

-       C{"1"} is compared with C{"10"}.

-       Do *not* use with packrat parsing enabled.

-    """

-    rep = Forward()

-    e2 = expr.copy()

-    rep << e2

-    def copyTokenToRepeater(s,l,t):

-        matchTokens = _flatten(t.asList())

-        def mustMatchTheseTokens(s,l,t):

-            theseTokens = _flatten(t.asList())

-            if  theseTokens != matchTokens:

-                raise ParseException("",0,"")

-        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )

-    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)

-    return rep

-

-def _escapeRegexRangeChars(s):

-    #~  escape these chars: ^-]

-    for c in r"\^-]":

-        s = s.replace(c,_bslash+c)

-    s = s.replace("\n",r"\n")

-    s = s.replace("\t",r"\t")

-    return _ustr(s)

-

-def oneOf( strs, caseless=False, useRegex=True ):

-    """Helper to quickly define a set of alternative Literals, and makes sure to do

-       longest-first testing when there is a conflict, regardless of the input order,

-       but returns a C{MatchFirst} for best performance.

-

-       Parameters:

-        - strs - a string of space-delimited literals, or a list of string literals

-        - caseless - (default=False) - treat all literals as caseless

-        - useRegex - (default=True) - as an optimization, will generate a Regex

-          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or

-          if creating a C{Regex} raises an exception)

-    """

-    if caseless:

-        isequal = ( lambda a,b: a.upper() == b.upper() )

-        masks = ( lambda a,b: b.upper().startswith(a.upper()) )

-        parseElementClass = CaselessLiteral

-    else:

-        isequal = ( lambda a,b: a == b )

-        masks = ( lambda a,b: b.startswith(a) )

-        parseElementClass = Literal

-

-    if isinstance(strs,(list,tuple)):

-        symbols = list(strs[:])

-    elif isinstance(strs,basestring):

-        symbols = strs.split()

-    else:

-        warnings.warn("Invalid argument to oneOf, expected string or list",

-                SyntaxWarning, stacklevel=2)

-

-    i = 0

-    while i < len(symbols)-1:

-        cur = symbols[i]

-        for j,other in enumerate(symbols[i+1:]):

-            if ( isequal(other, cur) ):

-                del symbols[i+j+1]

-                break

-            elif ( masks(cur, other) ):

-                del symbols[i+j+1]

-                symbols.insert(i,other)

-                cur = other

-                break

-        else:

-            i += 1

-

-    if not caseless and useRegex:

-        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))

-        try:

-            if len(symbols)==len("".join(symbols)):

-                return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )

-            else:

-                return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )

-        except:

-            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",

-                    SyntaxWarning, stacklevel=2)

-

-

-    # last resort, just use MatchFirst

-    return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )

-

-def dictOf( key, value ):

-    """Helper to easily and clearly define a dictionary by specifying the respective patterns

-       for the key and value.  Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens

-       in the proper order.  The key pattern can include delimiting markers or punctuation,

-       as long as they are suppressed, thereby leaving the significant key text.  The value

-       pattern can include named results, so that the C{Dict} results can include named token

-       fields.

-    """

-    return Dict( ZeroOrMore( Group ( key + value ) ) )

-

-def originalTextFor(expr, asString=True):

-    """Helper to return the original, untokenized text for a given expression.  Useful to

-       restore the parsed fields of an HTML start tag into the raw tag text itself, or to

-       revert separate tokens with intervening whitespace back to the original matching

-       input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not

-       require the inspect module to chase up the call stack.  By default, returns a 

-       string containing the original parsed text.  

-       

-       If the optional C{asString} argument is passed as C{False}, then the return value is a 

-       C{ParseResults} containing any results names that were originally matched, and a 

-       single token containing the original matched text from the input string.  So if 

-       the expression passed to C{L{originalTextFor}} contains expressions with defined

-       results names, you must set C{asString} to C{False} if you want to preserve those

-       results name values."""

-    locMarker = Empty().setParseAction(lambda s,loc,t: loc)

-    endlocMarker = locMarker.copy()

-    endlocMarker.callPreparse = False

-    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")

-    if asString:

-        extractText = lambda s,l,t: s[t._original_start:t._original_end]

-    else:

-        def extractText(s,l,t):

-            del t[:]

-            t.insert(0, s[t._original_start:t._original_end])

-            del t["_original_start"]

-            del t["_original_end"]

-    matchExpr.setParseAction(extractText)

-    return matchExpr

-

-def ungroup(expr): 

-    """Helper to undo pyparsing's default grouping of And expressions, even

-       if all but one are non-empty."""

-    return TokenConverter(expr).setParseAction(lambda t:t[0])

-

-# convenience constants for positional expressions

-empty       = Empty().setName("empty")

-lineStart   = LineStart().setName("lineStart")

-lineEnd     = LineEnd().setName("lineEnd")

-stringStart = StringStart().setName("stringStart")

-stringEnd   = StringEnd().setName("stringEnd")

-

-_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])

-_printables_less_backslash = "".join([ c for c in printables if c not in  r"\]" ])

-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],16)))

-_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))

-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)

-_charRange = Group(_singleChar + Suppress("-") + _singleChar)

-_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"

-

-_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)

-

-def srange(s):

-    r"""Helper to easily define string ranges for use in Word construction.  Borrows

-       syntax from regexp '[]' string range definitions::

-          srange("[0-9]")   -> "0123456789"

-          srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"

-          srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"

-       The input string must be enclosed in []'s, and the returned string is the expanded

-       character set joined into a single string.

-       The values enclosed in the []'s may be::

-          a single character

-          an escaped character with a leading backslash (such as \- or \])

-          an escaped hex character with a leading '\x' (\x21, which is a '!' character) 

-            (\0x## is also supported for backwards compatibility) 

-          an escaped octal character with a leading '\0' (\041, which is a '!' character)

-          a range of any of the above, separated by a dash ('a-z', etc.)

-          any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)

-    """

-    try:

-        return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])

-    except:

-        return ""

-

-def matchOnlyAtCol(n):

-    """Helper method for defining parse actions that require matching at a specific

-       column in the input text.

-    """

-    def verifyCol(strg,locn,toks):

-        if col(locn,strg) != n:

-            raise ParseException(strg,locn,"matched token not at column %d" % n)

-    return verifyCol

-

-def replaceWith(replStr):

-    """Helper method for common parse actions that simply return a literal value.  Especially

-       useful when used with C{transformString()}.

-    """

-    def _replFunc(*args):

-        return [replStr]

-    return _replFunc

-

-def removeQuotes(s,l,t):

-    """Helper parse action for removing quotation marks from parsed quoted strings.

-       To use, add this parse action to quoted string using::

-         quotedString.setParseAction( removeQuotes )

-    """

-    return t[0][1:-1]

-

-def upcaseTokens(s,l,t):

-    """Helper parse action to convert tokens to upper case."""

-    return [ tt.upper() for tt in map(_ustr,t) ]

-

-def downcaseTokens(s,l,t):

-    """Helper parse action to convert tokens to lower case."""

-    return [ tt.lower() for tt in map(_ustr,t) ]

-

-def keepOriginalText(s,startLoc,t):

-    """DEPRECATED - use new helper method C{originalTextFor}.

-       Helper parse action to preserve original parsed text,

-       overriding any nested parse actions."""

-    try:

-        endloc = getTokensEndLoc()

-    except ParseException:

-        raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")

-    del t[:]

-    t += ParseResults(s[startLoc:endloc])

-    return t

-

-def getTokensEndLoc():

-    """Method to be called from within a parse action to determine the end

-       location of the parsed tokens."""

-    import inspect

-    fstack = inspect.stack()

-    try:

-        # search up the stack (through intervening argument normalizers) for correct calling routine

-        for f in fstack[2:]:

-            if f[3] == "_parseNoCache":

-                endloc = f[0].f_locals["loc"]

-                return endloc

-        else:

-            raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")

-    finally:

-        del fstack

-

-def _makeTags(tagStr, xml):

-    """Internal helper to construct opening and closing tag expressions, given a tag name"""

-    if isinstance(tagStr,basestring):

-        resname = tagStr

-        tagStr = Keyword(tagStr, caseless=not xml)

-    else:

-        resname = tagStr.name

-

-    tagAttrName = Word(alphas,alphanums+"_-:")

-    if (xml):

-        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )

-        openTag = Suppress("<") + tagStr("tag") + \

-                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \

-                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")

-    else:

-        printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )

-        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)

-        openTag = Suppress("<") + tagStr("tag") + \

-                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \

-                Optional( Suppress("=") + tagAttrValue ) ))) + \

-                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")

-    closeTag = Combine(_L("</") + tagStr + ">")

-

-    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)

-    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)

-    openTag.tag = resname

-    closeTag.tag = resname

-    return openTag, closeTag

-

-def makeHTMLTags(tagStr):

-    """Helper to construct opening and closing tag expressions for HTML, given a tag name"""

-    return _makeTags( tagStr, False )

-

-def makeXMLTags(tagStr):

-    """Helper to construct opening and closing tag expressions for XML, given a tag name"""

-    return _makeTags( tagStr, True )

-

-def withAttribute(*args,**attrDict):

-    """Helper to create a validating parse action to be used with start tags created

-       with C{makeXMLTags} or C{makeHTMLTags}. Use C{withAttribute} to qualify a starting tag

-       with a required attribute value, to avoid false matches on common tags such as

-       C{<TD>} or C{<DIV>}.

-

-       Call C{withAttribute} with a series of attribute names and values. Specify the list

-       of filter attributes names and values as:

-        - keyword arguments, as in C{(align="right")}, or

-        - as an explicit dict with C{**} operator, when an attribute name is also a Python

-          reserved word, as in C{**{"class":"Customer", "align":"right"}}

-        - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )

-       For attribute names with a namespace prefix, you must use the second form.  Attribute

-       names are matched insensitive to upper/lower case.

-

-       To verify that the attribute exists, but without specifying a value, pass

-       C{withAttribute.ANY_VALUE} as the value.

-       """

-    if args:

-        attrs = args[:]

-    else:

-        attrs = attrDict.items()

-    attrs = [(k,v) for k,v in attrs]

-    def pa(s,l,tokens):

-        for attrName,attrValue in attrs:

-            if attrName not in tokens:

-                raise ParseException(s,l,"no matching attribute " + attrName)

-            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:

-                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %

-                                            (attrName, tokens[attrName], attrValue))

-    return pa

-withAttribute.ANY_VALUE = object()

-

-opAssoc = _Constants()

-opAssoc.LEFT = object()

-opAssoc.RIGHT = object()

-

-def operatorPrecedence( baseExpr, opList ):

-    """Helper method for constructing grammars of expressions made up of

-       operators working in a precedence hierarchy.  Operators may be unary or

-       binary, left- or right-associative.  Parse actions can also be attached

-       to operator expressions.

-

-       Parameters:

-        - baseExpr - expression representing the most basic element for the nested

-        - opList - list of tuples, one for each operator precedence level in the

-          expression grammar; each tuple is of the form

-          (opExpr, numTerms, rightLeftAssoc, parseAction), where:

-           - opExpr is the pyparsing expression for the operator;

-              may also be a string, which will be converted to a Literal;

-              if numTerms is 3, opExpr is a tuple of two expressions, for the

-              two operators separating the 3 terms

-           - numTerms is the number of terms for this operator (must

-              be 1, 2, or 3)

-           - rightLeftAssoc is the indicator whether the operator is

-              right or left associative, using the pyparsing-defined

-              constants opAssoc.RIGHT and opAssoc.LEFT.

-           - parseAction is the parse action to be associated with

-              expressions matching this operator expression (the

-              parse action tuple member may be omitted)

-    """

-    ret = Forward()

-    lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )

-    for i,operDef in enumerate(opList):

-        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]

-        if arity == 3:

-            if opExpr is None or len(opExpr) != 2:

-                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")

-            opExpr1, opExpr2 = opExpr

-        thisExpr = Forward()#.setName("expr%d" % i)

-        if rightLeftAssoc == opAssoc.LEFT:

-            if arity == 1:

-                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )

-            elif arity == 2:

-                if opExpr is not None:

-                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )

-                else:

-                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )

-            elif arity == 3:

-                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \

-                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )

-            else:

-                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")

-        elif rightLeftAssoc == opAssoc.RIGHT:

-            if arity == 1:

-                # try to avoid LR with this extra test

-                if not isinstance(opExpr, Optional):

-                    opExpr = Optional(opExpr)

-                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )

-            elif arity == 2:

-                if opExpr is not None:

-                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )

-                else:

-                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )

-            elif arity == 3:

-                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \

-                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )

-            else:

-                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")

-        else:

-            raise ValueError("operator must indicate right or left associativity")

-        if pa:

-            matchExpr.setParseAction( pa )

-        thisExpr << ( matchExpr | lastExpr )

-        lastExpr = thisExpr

-    ret << lastExpr

-    return ret

-

-dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")

-sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")

-quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")

-unicodeString = Combine(_L('u') + quotedString.copy())

-

-def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):

-    """Helper method for defining nested lists enclosed in opening and closing

-       delimiters ("(" and ")" are the default).

-

-       Parameters:

-        - opener - opening character for a nested list (default="("); can also be a pyparsing expression

-        - closer - closing character for a nested list (default=")"); can also be a pyparsing expression

-        - content - expression for items within the nested lists (default=None)

-        - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)

-

-       If an expression is not provided for the content argument, the nested

-       expression will capture all whitespace-delimited content between delimiters

-       as a list of separate values.

-

-       Use the C{ignoreExpr} argument to define expressions that may contain

-       opening or closing characters that should not be treated as opening

-       or closing characters for nesting, such as quotedString or a comment

-       expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.

-       The default is L{quotedString}, but if no expressions are to be ignored,

-       then pass C{None} for this argument.

-    """

-    if opener == closer:

-        raise ValueError("opening and closing strings cannot be the same")

-    if content is None:

-        if isinstance(opener,basestring) and isinstance(closer,basestring):

-            if len(opener) == 1 and len(closer)==1:

-                if ignoreExpr is not None:

-                    content = (Combine(OneOrMore(~ignoreExpr +

-                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))

-                                ).setParseAction(lambda t:t[0].strip()))

-                else:

-                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS

-                                ).setParseAction(lambda t:t[0].strip()))

-            else:

-                if ignoreExpr is not None:

-                    content = (Combine(OneOrMore(~ignoreExpr + 

-                                    ~Literal(opener) + ~Literal(closer) +

-                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))

-                                ).setParseAction(lambda t:t[0].strip()))

-                else:

-                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +

-                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))

-                                ).setParseAction(lambda t:t[0].strip()))

-        else:

-            raise ValueError("opening and closing arguments must be strings if no content expression is given")

-    ret = Forward()

-    if ignoreExpr is not None:

-        ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )

-    else:

-        ret << Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )

-    return ret

-

-def indentedBlock(blockStatementExpr, indentStack, indent=True):

-    """Helper method for defining space-delimited indentation blocks, such as

-       those used to define block statements in Python source code.

-

-       Parameters:

-        - blockStatementExpr - expression defining syntax of statement that

-            is repeated within the indented block

-        - indentStack - list created by caller to manage indentation stack

-            (multiple statementWithIndentedBlock expressions within a single grammar

-            should share a common indentStack)

-        - indent - boolean indicating whether block must be indented beyond the

-            the current level; set to False for block of left-most statements

-            (default=True)

-

-       A valid block must contain at least one C{blockStatement}.

-    """

-    def checkPeerIndent(s,l,t):

-        if l >= len(s): return

-        curCol = col(l,s)

-        if curCol != indentStack[-1]:

-            if curCol > indentStack[-1]:

-                raise ParseFatalException(s,l,"illegal nesting")

-            raise ParseException(s,l,"not a peer entry")

-

-    def checkSubIndent(s,l,t):

-        curCol = col(l,s)

-        if curCol > indentStack[-1]:

-            indentStack.append( curCol )

-        else:

-            raise ParseException(s,l,"not a subentry")

-

-    def checkUnindent(s,l,t):

-        if l >= len(s): return

-        curCol = col(l,s)

-        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):

-            raise ParseException(s,l,"not an unindent")

-        indentStack.pop()

-

-    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())

-    INDENT = Empty() + Empty().setParseAction(checkSubIndent)

-    PEER   = Empty().setParseAction(checkPeerIndent)

-    UNDENT = Empty().setParseAction(checkUnindent)

-    if indent:

-        smExpr = Group( Optional(NL) +

-            #~ FollowedBy(blockStatementExpr) +

-            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)

-    else:

-        smExpr = Group( Optional(NL) +

-            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )

-    blockStatementExpr.ignore(_bslash + LineEnd())

-    return smExpr

-

-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")

-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")

-

-anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))

-commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()

-_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))

-replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None

-

-# it's easy to get these comment structures wrong - they're very common, so may as well make them available

-cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")

-

-htmlComment = Regex(r"<!--[\s\S]*?-->")

-restOfLine = Regex(r".*").leaveWhitespace()

-dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")

-cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")

-

-javaStyleComment = cppStyleComment

-pythonStyleComment = Regex(r"#.*").setName("Python style comment")

-_noncomma = "".join( [ c for c in printables if c != "," ] )

-_commasepitem = Combine(OneOrMore(Word(_noncomma) +

-                                  Optional( Word(" \t") +

-                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")

-commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")

-

-

-if __name__ == "__main__":

-

-    def test( teststring ):

-        try:

-            tokens = simpleSQL.parseString( teststring )

-            tokenlist = tokens.asList()

-            print (teststring + "->"   + str(tokenlist))

-            print ("tokens = "         + str(tokens))

-            print ("tokens.columns = " + str(tokens.columns))

-            print ("tokens.tables = "  + str(tokens.tables))

-            print (tokens.asXML("SQL",True))

-        except ParseBaseException:

-            err = sys.exc_info()[1]

-            print (teststring + "->")

-            print (err.line)

-            print (" "*(err.column-1) + "^")

-            print (err)

-        print()

-

-    selectToken    = CaselessLiteral( "select" )

-    fromToken      = CaselessLiteral( "from" )

-

-    ident          = Word( alphas, alphanums + "_$" )

-    columnName     = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )

-    columnNameList = Group( delimitedList( columnName ) )#.setName("columns")

-    tableName      = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )

-    tableNameList  = Group( delimitedList( tableName ) )#.setName("tables")

-    simpleSQL      = ( selectToken + \

-                     ( '*' | columnNameList ).setResultsName( "columns" ) + \

-                     fromToken + \

-                     tableNameList.setResultsName( "tables" ) )

-

-    test( "SELECT * from XYZZY, ABC" )

-    test( "select * from SYS.XYZZY" )

-    test( "Select A from Sys.dual" )

-    test( "Select AA,BB,CC from Sys.dual" )

-    test( "Select A, B, C from Sys.dual" )

-    test( "Select A, B, C from Sys.dual" )

-    test( "Xelect A, B, C from Sys.dual" )

-    test( "Select A, B, C frox Sys.dual" )

-    test( "Select" )

-    test( "Select ^^^ frox Sys.dual" )

-    test( "Select A, B, C from Sys.dual, Table2   " )

+# module pyparsing.py
+#
+# Copyright (c) 2003-2011  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+#from __future__ import generators
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word( alphas ) + "," + Word( alphas ) + "!"
+
+    hello = "Hello, World!"
+    print hello, "->", greet.parseString( hello )
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
+ - quoted strings
+ - embedded comments
+"""
+
+__version__ = "1.5.6"
+__versionTime__ = "26 June 2011 10:53"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
+'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor',
+]
+
+"""
+Detect if we are running version 3.X and make appropriate changes
+Robert A. Clark
+"""
+_PY3K = sys.version_info[0] > 2
+if _PY3K:
+    _MAX_INT = sys.maxsize
+    basestring = str
+    unichr = chr
+    _ustr = str
+    alphas = string.ascii_lowercase + string.ascii_uppercase
+else:
+    _MAX_INT = sys.maxint
+    range = xrange
+    set = lambda s : dict( [(c,0) for c in s] )
+    alphas = string.lowercase + string.uppercase
+
+    def _ustr(obj):
+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+           then < returns the unicode object | encodes it with the default encoding | ... >.
+        """
+        if isinstance(obj,unicode):
+            return obj
+
+        try:
+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+            # it won't break any existing code.
+            return str(obj)
+
+        except UnicodeEncodeError:
+            # The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
+            # state that "The return value must be a string object". However, does a
+            # unicode object (being a subclass of basestring) count as a "string
+            # object"?
+            # If so, then return a unicode object:
+            return unicode(obj)
+            # Else encode it... but how? There are many choices... :)
+            # Replace unprintables with escape codes?
+            #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
+            # Replace unprintables with question marks?
+            #return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
+            # ...
+
+    alphas = string.lowercase + string.uppercase
+
+# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+singleArgBuiltins = []
+import __builtin__
+for fname in "sum len enumerate sorted reversed list tuple set any all".split():
+    try:
+        singleArgBuiltins.append(getattr(__builtin__,fname))
+    except AttributeError:
+        continue
+
+def _xml_escape(data):
+    """Escape &, <, >, ", ', etc. in a string of data."""
+
+    # ampersand must be replaced first
+    from_symbols = '&><"\''
+    to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
+    for from_,to_ in zip(from_symbols, to_symbols):
+        data = data.replace(from_, to_)
+    return data
+
+class _Constants(object):
+    pass
+
+nums       = string.digits
+hexnums    = nums + "ABCDEFabcdef"
+alphanums  = alphas + nums
+_bslash    = chr(92)
+printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
+
+class ParseBaseException(Exception):
+    """base exception class for all parsing runtime exceptions"""
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, pstr, loc=0, msg=None, elem=None ):
+        self.loc = loc
+        if msg is None:
+            self.msg = pstr
+            self.pstr = ""
+        else:
+            self.msg = msg
+            self.pstr = pstr
+        self.parserElement = elem
+
+    def __getattr__( self, aname ):
+        """supported attributes by name are:
+            - lineno - returns the line number of the exception text
+            - col - returns the column number of the exception text
+            - line - returns the line containing the exception text
+        """
+        if( aname == "lineno" ):
+            return lineno( self.loc, self.pstr )
+        elif( aname in ("col", "column") ):
+            return col( self.loc, self.pstr )
+        elif( aname == "line" ):
+            return line( self.loc, self.pstr )
+        else:
+            raise AttributeError(aname)
+
+    def __str__( self ):
+        return "%s (at char %d), (line:%d, col:%d)" % \
+                ( self.msg, self.loc, self.lineno, self.column )
+    def __repr__( self ):
+        return _ustr(self)
+    def markInputline( self, markerString = ">!<" ):
+        """Extracts the exception line from the input string, and marks
+           the location of the exception with a special symbol.
+        """
+        line_str = self.line
+        line_column = self.column - 1
+        if markerString:
+            line_str = "".join( [line_str[:line_column],
+                                markerString, line_str[line_column:]])
+        return line_str.strip()
+    def __dir__(self):
+        return "loc msg pstr parserElement lineno col line " \
+               "markInputLine __str__ __repr__".split()
+
+class ParseException(ParseBaseException):
+    """exception thrown when parse expressions don't match class;
+       supported attributes by name are:
+        - lineno - returns the line number of the exception text
+        - col - returns the column number of the exception text
+        - line - returns the line containing the exception text
+    """
+    pass
+
+class ParseFatalException(ParseBaseException):
+    """user-throwable exception thrown when inconsistent parse content
+       is found; stops all parsing immediately"""
+    pass
+
+class ParseSyntaxException(ParseFatalException):
+    """just like C{ParseFatalException}, but thrown internally when an
+       C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
+       an unbacktrackable syntax error has been found"""
+    def __init__(self, pe):
+        super(ParseSyntaxException, self).__init__(
+                                    pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+#~ class ReparseException(ParseBaseException):
+    #~ """Experimental class - parse actions can raise this exception to cause
+       #~ pyparsing to reparse the input string:
+        #~ - with a modified input string, and/or
+        #~ - with a modified start location
+       #~ Set the values of the ReparseException in the constructor, and raise the
+       #~ exception in a parse action to cause pyparsing to use the new string/location.
+       #~ Setting the values as None causes no change to be made.
+       #~ """
+    #~ def __init_( self, newstring, restartLoc ):
+        #~ self.newParseText = newstring
+        #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+    """exception thrown by C{validate()} if the grammar could be improperly recursive"""
+    def __init__( self, parseElementList ):
+        self.parseElementTrace = parseElementList
+
+    def __str__( self ):
+        return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+    def __init__(self,p1,p2):
+        self.tup = (p1,p2)
+    def __getitem__(self,i):
+        return self.tup[i]
+    def __repr__(self):
+        return repr(self.tup)
+    def setOffset(self,i):
+        self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+    """Structured parse results, to provide multiple means of access to the parsed data:
+       - as a list (C{len(results)})
+       - by list index (C{results[0], results[1]}, etc.)
+       - by attribute (C{results.<resultsName>})
+       """
+    #~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
+    def __new__(cls, toklist, name=None, asList=True, modal=True ):
+        if isinstance(toklist, cls):
+            return toklist
+        retobj = object.__new__(cls)
+        retobj.__doinit = True
+        return retobj
+
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
+        if self.__doinit:
+            self.__doinit = False
+            self.__name = None
+            self.__parent = None
+            self.__accumNames = {}
+            if isinstance(toklist, list):
+                self.__toklist = toklist[:]
+            else:
+                self.__toklist = [toklist]
+            self.__tokdict = dict()
+
+        if name is not None and name:
+            if not modal:
+                self.__accumNames[name] = 0
+            if isinstance(name,int):
+                name = _ustr(name) # will always return a str, but use _ustr for consistency
+            self.__name = name
+            if not toklist in (None,'',[]):
+                if isinstance(toklist,basestring):
+                    toklist = [ toklist ]
+                if asList:
+                    if isinstance(toklist,ParseResults):
+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+                    else:
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+                    self[name].__name = name
+                else:
+                    try:
+                        self[name] = toklist[0]
+                    except (KeyError,TypeError,IndexError):
+                        self[name] = toklist
+
+    def __getitem__( self, i ):
+        if isinstance( i, (int,slice) ):
+            return self.__toklist[i]
+        else:
+            if i not in self.__accumNames:
+                return self.__tokdict[i][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+    def __setitem__( self, k, v, isinstance=isinstance ):
+        if isinstance(v,_ParseResultsWithOffset):
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+            sub = v[0]
+        elif isinstance(k,int):
+            self.__toklist[k] = v
+            sub = v
+        else:
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+            sub = v
+        if isinstance(sub,ParseResults):
+            sub.__parent = wkref(self)
+
+    def __delitem__( self, i ):
+        if isinstance(i,(int,slice)):
+            mylen = len( self.__toklist )
+            del self.__toklist[i]
+
+            # convert int to slice
+            if isinstance(i, int):
+                if i < 0:
+                    i += mylen
+                i = slice(i, i+1)
+            # get removed indices
+            removed = list(range(*i.indices(mylen)))
+            removed.reverse()
+            # fixup indices in token dictionary
+            for name in self.__tokdict:
+                occurrences = self.__tokdict[name]
+                for j in removed:
+                    for k, (value, position) in enumerate(occurrences):
+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+        else:
+            del self.__tokdict[i]
+
+    def __contains__( self, k ):
+        return k in self.__tokdict
+
+    def __len__( self ): return len( self.__toklist )
+    def __bool__(self): return len( self.__toklist ) > 0
+    __nonzero__ = __bool__
+    def __iter__( self ): return iter( self.__toklist )
+    def __reversed__( self ): return iter( self.__toklist[::-1] )
+    def keys( self ):
+        """Returns all named result keys."""
+        return self.__tokdict.keys()
+
+    def pop( self, index=-1 ):
+        """Removes and returns item at specified index (default=last).
+           Will work with either numeric indices or dict-key indicies."""
+        ret = self[index]
+        del self[index]
+        return ret
+
+    def get(self, key, defaultValue=None):
+        """Returns named result matching the given key, or if there is no
+           such name, then returns the given C{defaultValue} or C{None} if no
+           C{defaultValue} is specified."""
+        if key in self:
+            return self[key]
+        else:
+            return defaultValue
+
+    def insert( self, index, insStr ):
+        """Inserts new element at location index in the list of parsed tokens."""
+        self.__toklist.insert(index, insStr)
+        # fixup indices in token dictionary
+        for name in self.__tokdict:
+            occurrences = self.__tokdict[name]
+            for k, (value, position) in enumerate(occurrences):
+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+    def items( self ):
+        """Returns all named result keys and values as a list of tuples."""
+        return [(k,self[k]) for k in self.__tokdict]
+
+    def values( self ):
+        """Returns all named result values."""
+        return [ v[-1][0] for v in self.__tokdict.values() ]
+
+    def __getattr__( self, name ):
+        if True: #name not in self.__slots__:
+            if name in self.__tokdict:
+                if name not in self.__accumNames:
+                    return self.__tokdict[name][-1][0]
+                else:
+                    return ParseResults([ v[0] for v in self.__tokdict[name] ])
+            else:
+                return ""
+        return None
+
+    def __add__( self, other ):
+        ret = self.copy()
+        ret += other
+        return ret
+
+    def __iadd__( self, other ):
+        if other.__tokdict:
+            offset = len(self.__toklist)
+            addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
+            otheritems = other.__tokdict.items()
+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+                                for (k,vlist) in otheritems for v in vlist]
+            for k,v in otherdictitems:
+                self[k] = v
+                if isinstance(v[0],ParseResults):
+                    v[0].__parent = wkref(self)
+
+        self.__toklist += other.__toklist
+        self.__accumNames.update( other.__accumNames )
+        return self
+
+    def __radd__(self, other):
+        if isinstance(other,int) and other == 0:
+            return self.copy()
+
+    def __repr__( self ):
+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+    def __str__( self ):
+        out = "["
+        sep = ""
+        for i in self.__toklist:
+            if isinstance(i, ParseResults):
+                out += sep + _ustr(i)
+            else:
+                out += sep + repr(i)
+            sep = ", "
+        out += "]"
+        return out
+
+    def _asStringList( self, sep='' ):
+        out = []
+        for item in self.__toklist:
+            if out and sep:
+                out.append(sep)
+            if isinstance( item, ParseResults ):
+                out += item._asStringList()
+            else:
+                out.append( _ustr(item) )
+        return out
+
+    def asList( self ):
+        """Returns the parse results as a nested list of matching tokens, all converted to strings."""
+        out = []
+        for res in self.__toklist:
+            if isinstance(res,ParseResults):
+                out.append( res.asList() )
+            else:
+                out.append( res )
+        return out
+
+    def asDict( self ):
+        """Returns the named parse results as dictionary."""
+        return dict( self.items() )
+
+    def copy( self ):
+        """Returns a new copy of a C{ParseResults} object."""
+        ret = ParseResults( self.__toklist )
+        ret.__tokdict = self.__tokdict.copy()
+        ret.__parent = self.__parent
+        ret.__accumNames.update( self.__accumNames )
+        ret.__name = self.__name
+        return ret
+
+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+        """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
+        nl = "\n"
+        out = []
+        namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
+                                                            for v in vlist ] )
+        nextLevelIndent = indent + "  "
+
+        # collapse out indents if formatting is not desired
+        if not formatted:
+            indent = ""
+            nextLevelIndent = ""
+            nl = ""
+
+        selfTag = None
+        if doctag is not None:
+            selfTag = doctag
+        else:
+            if self.__name:
+                selfTag = self.__name
+
+        if not selfTag:
+            if namedItemsOnly:
+                return ""
+            else:
+                selfTag = "ITEM"
+
+        out += [ nl, indent, "<", selfTag, ">" ]
+
+        worklist = self.__toklist
+        for i,res in enumerate(worklist):
+            if isinstance(res,ParseResults):
+                if i in namedItems:
+                    out += [ res.asXML(namedItems[i],
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+                else:
+                    out += [ res.asXML(None,
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+            else:
+                # individual token, see if there is a name for it
+                resTag = None
+                if i in namedItems:
+                    resTag = namedItems[i]
+                if not resTag:
+                    if namedItemsOnly:
+                        continue
+                    else:
+                        resTag = "ITEM"
+                xmlBodyText = _xml_escape(_ustr(res))
+                out += [ nl, nextLevelIndent, "<", resTag, ">",
+                                                xmlBodyText,
+                                                "</", resTag, ">" ]
+
+        out += [ nl, indent, "</", selfTag, ">" ]
+        return "".join(out)
+
+    def __lookup(self,sub):
+        for k,vlist in self.__tokdict.items():
+            for v,loc in vlist:
+                if sub is v:
+                    return k
+        return None
+
+    def getName(self):
+        """Returns the results name for this token expression."""
+        if self.__name:
+            return self.__name
+        elif self.__parent:
+            par = self.__parent()
+            if par:
+                return par.__lookup(self)
+            else:
+                return None
+        elif (len(self) == 1 and
+               len(self.__tokdict) == 1 and
+               self.__tokdict.values()[0][0][1] in (0,-1)):
+            return self.__tokdict.keys()[0]
+        else:
+            return None
+
+    def dump(self,indent='',depth=0):
+        """Diagnostic method for listing out the contents of a C{ParseResults}.
+           Accepts an optional C{indent} argument so that this string can be embedded
+           in a nested display of other data."""
+        out = []
+        out.append( indent+_ustr(self.asList()) )
+        keys = self.items()
+        keys.sort()
+        for k,v in keys:
+            if out:
+                out.append('\n')
+            out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
+            if isinstance(v,ParseResults):
+                if v.keys():
+                    out.append( v.dump(indent,depth+1) )
+                else:
+                    out.append(_ustr(v))
+            else:
+                out.append(_ustr(v))
+        return "".join(out)
+
+    # add support for pickle protocol
+    def __getstate__(self):
+        return ( self.__toklist,
+                 ( self.__tokdict.copy(),
+                   self.__parent is not None and self.__parent() or None,
+                   self.__accumNames,
+                   self.__name ) )
+
+    def __setstate__(self,state):
+        self.__toklist = state[0]
+        (self.__tokdict,
+         par,
+         inAccumNames,
+         self.__name) = state[1]
+        self.__accumNames = {}
+        self.__accumNames.update(inAccumNames)
+        if par is not None:
+            self.__parent = wkref(par)
+        else:
+            self.__parent = None
+
+    def __dir__(self):
+        return dir(super(ParseResults,self)) + self.keys()
+
+def col (loc,strg):
+    """Returns current column within a string, counting newlines as line separators.
+   The first column is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+   on parsing strings containing <TAB>s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+    """Returns current line number within a string, counting newlines as line separators.
+   The first line is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+   on parsing strings containing <TAB>s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+    """Returns the line of text containing loc within a string, counting newlines as line separators.
+       """
+    lastCR = strg.rfind("\n", 0, loc)
+    nextCR = strg.find("\n", loc)
+    if nextCR >= 0:
+        return strg[lastCR+1:nextCR]
+    else:
+        return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+    print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+    print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+    """'Do-nothing' debug action, to suppress debugging output during parsing."""
+    pass
+
+'decorator to trim function calls to match the arity of the target'
+if not _PY3K:
+    def _trim_arity(func, maxargs=2):
+        limit = [0]
+        def wrapper(*args):
+            while 1:
+                try:
+                    return func(*args[limit[0]:])
+                except TypeError:
+                    if limit[0] <= maxargs:
+                        limit[0] += 1
+                        continue
+                    raise
+        return wrapper
+else:
+    def _trim_arity(func, maxargs=2):
+        limit = maxargs
+        def wrapper(*args):
+            #~ nonlocal limit
+            while 1:
+                try:
+                    return func(*args[limit:])
+                except TypeError:
+                    if limit:
+                        limit -= 1
+                        continue
+                    raise
+        return wrapper
+
+class ParserElement(object):
+    """Abstract base level parser element class."""
+    DEFAULT_WHITE_CHARS = " \n\t\r"
+    verbose_stacktrace = False
+
+    def setDefaultWhitespaceChars( chars ):
+        """Overrides the default whitespace chars
+        """
+        ParserElement.DEFAULT_WHITE_CHARS = chars
+    setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
+
+    def __init__( self, savelist=False ):
+        self.parseAction = list()
+        self.failAction = None
+        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall
+        self.strRepr = None
+        self.resultsName = None
+        self.saveAsList = savelist
+        self.skipWhitespace = True
+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        self.copyDefaultWhiteChars = True
+        self.mayReturnEmpty = False # used when checking for left-recursion
+        self.keepTabs = False
+        self.ignoreExprs = list()
+        self.debug = False
+        self.streamlined = False
+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+        self.errmsg = ""
+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+        self.debugActions = ( None, None, None ) #custom debug actions
+        self.re = None
+        self.callPreparse = True # used to avoid redundant calls to preParse
+        self.callDuringTry = False
+
+    def copy( self ):
+        """Make a copy of this C{ParserElement}.  Useful for defining different parse actions
+           for the same parsing pattern, using copies of the original parse element."""
+        cpy = copy.copy( self )
+        cpy.parseAction = self.parseAction[:]
+        cpy.ignoreExprs = self.ignoreExprs[:]
+        if self.copyDefaultWhiteChars:
+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        return cpy
+
+    def setName( self, name ):
+        """Define name for this expression, for use in debugging."""
+        self.name = name
+        self.errmsg = "Expected " + self.name
+        if hasattr(self,"exception"):
+            self.exception.msg = self.errmsg
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        """Define name for referencing matching tokens as a nested attribute
+           of the returned parse results.
+           NOTE: this returns a *copy* of the original C{ParserElement} object;
+           this is so that the client can define a basic element, such as an
+           integer, and reference it in multiple places with different names.
+
+           You can also set results names using the abbreviated syntax,
+           C{expr("name")} in place of C{expr.setResultsName("name")} -
+           see L{I{__call__}<__call__>}.
+        """
+        newself = self.copy()
+        if name.endswith("*"):
+            name = name[:-1]
+            listAllMatches=True
+        newself.resultsName = name
+        newself.modalResults = not listAllMatches
+        return newself
+
+    def setBreak(self,breakFlag = True):
+        """Method to invoke the Python pdb debugger when this element is
+           about to be parsed. Set C{breakFlag} to True to enable, False to
+           disable.
+        """
+        if breakFlag:
+            _parseMethod = self._parse
+            def breaker(instring, loc, doActions=True, callPreParse=True):
+                import pdb
+                pdb.set_trace()
+                return _parseMethod( instring, loc, doActions, callPreParse )
+            breaker._originalParseMethod = _parseMethod
+            self._parse = breaker
+        else:
+            if hasattr(self._parse,"_originalParseMethod"):
+                self._parse = self._parse._originalParseMethod
+        return self
+
+    def setParseAction( self, *fns, **kwargs ):
+        """Define action to perform when successfully matching parse element definition.
+           Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+           C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+            - s   = the original string being parsed (see note below)
+            - loc = the location of the matching substring
+            - toks = a list of the matched tokens, packaged as a ParseResults object
+           If the functions in fns modify the tokens, they can return them as the return
+           value from fn, and the modified list of tokens will replace the original.
+           Otherwise, fn does not need to return any value.
+
+           Note: the default parsing behavior is to expand tabs in the input string
+           before starting the parsing process.  See L{I{parseString}<parseString>} for more information
+           on parsing strings containing <TAB>s, and suggested methods to maintain a
+           consistent view of the parsed string, the parse location, and line and column
+           positions within the parsed string.
+           """
+        self.parseAction = list(map(_trim_arity, list(fns)))
+        self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
+        return self
+
+    def addParseAction( self, *fns, **kwargs ):
+        """Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
+        self.parseAction += list(map(_trim_arity, list(fns)))
+        self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
+        return self
+
+    def setFailAction( self, fn ):
+        """Define action to perform if parsing fails at this expression.
+           Fail acton fn is a callable function that takes the arguments
+           C{fn(s,loc,expr,err)} where:
+            - s = string being parsed
+            - loc = location where expression match was attempted and failed
+            - expr = the parse expression that failed
+            - err = the exception thrown
+           The function returns no value.  It may throw C{ParseFatalException}
+           if it is desired to stop parsing immediately."""
+        self.failAction = fn
+        return self
+
+    def _skipIgnorables( self, instring, loc ):
+        exprsFound = True
+        while exprsFound:
+            exprsFound = False
+            for e in self.ignoreExprs:
+                try:
+                    while 1:
+                        loc,dummy = e._parse( instring, loc )
+                        exprsFound = True
+                except ParseException:
+                    pass
+        return loc
+
+    def preParse( self, instring, loc ):
+        if self.ignoreExprs:
+            loc = self._skipIgnorables( instring, loc )
+
+        if self.skipWhitespace:
+            wt = self.whiteChars
+            instrlen = len(instring)
+            while loc < instrlen and instring[loc] in wt:
+                loc += 1
+
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        return loc, []
+
+    def postParse( self, instring, loc, tokenlist ):
+        return tokenlist
+
+    #~ @profile
+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+        debugging = ( self.debug ) #and doActions )
+
+        if debugging or self.failAction:
+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+            if (self.debugActions[0] ):
+                self.debugActions[0]( instring, loc, self )
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            try:
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            except ParseBaseException:
+                #~ print ("Exception raised:", err)
+                err = None
+                if self.debugActions[2]:
+                    err = sys.exc_info()[1]
+                    self.debugActions[2]( instring, tokensStart, self, err )
+                if self.failAction:
+                    if err is None:
+                        err = sys.exc_info()[1]
+                    self.failAction( instring, tokensStart, self, err )
+                raise
+        else:
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            if self.mayIndexError or loc >= len(instring):
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            else:
+                loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+        tokens = self.postParse( instring, loc, tokens )
+
+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+        if self.parseAction and (doActions or self.callDuringTry):
+            if debugging:
+                try:
+                    for fn in self.parseAction:
+                        tokens = fn( instring, tokensStart, retTokens )
+                        if tokens is not None:
+                            retTokens = ParseResults( tokens,
+                                                      self.resultsName,
+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                      modal=self.modalResults )
+                except ParseBaseException:
+                    #~ print "Exception raised in user parse action:", err
+                    if (self.debugActions[2] ):
+                        err = sys.exc_info()[1]
+                        self.debugActions[2]( instring, tokensStart, self, err )
+                    raise
+            else:
+                for fn in self.parseAction:
+                    tokens = fn( instring, tokensStart, retTokens )
+                    if tokens is not None:
+                        retTokens = ParseResults( tokens,
+                                                  self.resultsName,
+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                  modal=self.modalResults )
+
+        if debugging:
+            #~ print ("Matched",self,"->",retTokens.asList())
+            if (self.debugActions[1] ):
+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+        return loc, retTokens
+
+    def tryParse( self, instring, loc ):
+        try:
+            return self._parse( instring, loc, doActions=False )[0]
+        except ParseFatalException:
+            raise ParseException( instring, loc, self.errmsg, self)
+
+    # this method gets repeatedly called during backtracking with the same arguments -
+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+        lookup = (self,instring,loc,callPreParse,doActions)
+        if lookup in ParserElement._exprArgCache:
+            value = ParserElement._exprArgCache[ lookup ]
+            if isinstance(value, Exception):
+                raise value
+            return (value[0],value[1].copy())
+        else:
+            try:
+                value = self._parseNoCache( instring, loc, doActions, callPreParse )
+                ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
+                return value
+            except ParseBaseException:
+                pe = sys.exc_info()[1]
+                ParserElement._exprArgCache[ lookup ] = pe
+                raise
+
+    _parse = _parseNoCache
+
+    # argument cache for optimizing repeated calls when backtracking through recursive expressions
+    _exprArgCache = {}
+    def resetCache():
+        ParserElement._exprArgCache.clear()
+    resetCache = staticmethod(resetCache)
+
+    _packratEnabled = False
+    def enablePackrat():
+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+           Repeated parse attempts at the same string location (which happens
+           often in many complex grammars) can immediately return a cached value,
+           instead of re-executing parsing/validating code.  Memoizing is done of
+           both valid results and parsing exceptions.
+
+           This speedup may break existing programs that use parse actions that
+           have side-effects.  For this reason, packrat parsing is disabled when
+           you first import pyparsing.  To activate the packrat feature, your
+           program must call the class method C{ParserElement.enablePackrat()}.  If
+           your program uses C{psyco} to "compile as you go", you must call
+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
+           Python will crash.  For best results, call C{enablePackrat()} immediately
+           after importing pyparsing.
+        """
+        if not ParserElement._packratEnabled:
+            ParserElement._packratEnabled = True
+            ParserElement._parse = ParserElement._parseCache
+    enablePackrat = staticmethod(enablePackrat)
+
+    def parseString( self, instring, parseAll=False ):
+        """Execute the parse expression with the given string.
+           This is the main interface to the client code, once the complete
+           expression has been built.
+
+           If you want the grammar to require that the entire input string be
+           successfully parsed, then set C{parseAll} to True (equivalent to ending
+           the grammar with C{StringEnd()}).
+
+           Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+           in order to report proper column numbers in parse actions.
+           If the input string contains tabs and
+           the grammar uses parse actions that use the C{loc} argument to index into the
+           string being parsed, you can ensure you have a consistent view of the input
+           string by:
+            - calling C{parseWithTabs} on your grammar before calling C{parseString}
+              (see L{I{parseWithTabs}<parseWithTabs>})
+            - define your parse action using the full C{(s,loc,toks)} signature, and
+              reference the input string using the parse action's C{s} argument
+            - explictly expand the tabs in your input string before calling
+              C{parseString}
+        """
+        ParserElement.resetCache()
+        if not self.streamlined:
+            self.streamline()
+            #~ self.saveAsList = True
+        for e in self.ignoreExprs:
+            e.streamline()
+        if not self.keepTabs:
+            instring = instring.expandtabs()
+        try:
+            loc, tokens = self._parse( instring, 0 )
+            if parseAll:
+                loc = self.preParse( instring, loc )
+                se = Empty() + StringEnd()
+                se._parse( instring, loc )
+        except ParseBaseException:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                exc = sys.exc_info()[1]
+                raise exc
+        else:
+            return tokens
+
+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+        """Scan the input string for expression matches.  Each match will return the
+           matching tokens, start location, and end location.  May be called with optional
+           C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
+           C{overlap} is specified, then overlapping matches will be reported.
+
+           Note that the start and end locations are reported relative to the string
+           being parsed.  See L{I{parseString}<parseString>} for more information on parsing
+           strings with embedded tabs."""
+        if not self.streamlined:
+            self.streamline()
+        for e in self.ignoreExprs:
+            e.streamline()
+
+        if not self.keepTabs:
+            instring = _ustr(instring).expandtabs()
+        instrlen = len(instring)
+        loc = 0
+        preparseFn = self.preParse
+        parseFn = self._parse
+        ParserElement.resetCache()
+        matches = 0
+        try:
+            while loc <= instrlen and matches < maxMatches:
+                try:
+                    preloc = preparseFn( instring, loc )
+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+                except ParseException:
+                    loc = preloc+1
+                else:
+                    if nextLoc > loc:
+                        matches += 1
+                        yield tokens, preloc, nextLoc
+                        if overlap:
+                            nextloc = preparseFn( instring, loc )
+                            if nextloc > loc:
+                                loc = nextLoc
+                            else:
+                                loc += 1
+                        else:
+                            loc = nextLoc
+                    else:
+                        loc = preloc+1
+        except ParseBaseException:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                exc = sys.exc_info()[1]
+                raise exc
+
+    def transformString( self, instring ):
+        """Extension to C{scanString}, to modify matching text with modified tokens that may
+           be returned from a parse action.  To use C{transformString}, define a grammar and
+           attach a parse action to it that modifies the returned token list.
+           Invoking C{transformString()} on a target string will then scan for matches,
+           and replace the matched text patterns according to the logic in the parse
+           action.  C{transformString()} returns the resulting transformed string."""
+        out = []
+        lastE = 0
+        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+        # keep string locs straight between transformString and scanString
+        self.keepTabs = True
+        try:
+            for t,s,e in self.scanString( instring ):
+                out.append( instring[lastE:s] )
+                if t:
+                    if isinstance(t,ParseResults):
+                        out += t.asList()
+                    elif isinstance(t,list):
+                        out += t
+                    else:
+                        out.append(t)
+                lastE = e
+            out.append(instring[lastE:])
+            out = [o for o in out if o]
+            return "".join(map(_ustr,_flatten(out)))
+        except ParseBaseException:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                exc = sys.exc_info()[1]
+                raise exc
+
+    def searchString( self, instring, maxMatches=_MAX_INT ):
+        """Another extension to C{scanString}, simplifying the access to the tokens found
+           to match the given parse expression.  May be called with optional
+           C{maxMatches} argument, to clip searching after 'n' matches are found.
+        """
+        try:
+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+        except ParseBaseException:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                exc = sys.exc_info()[1]
+                raise exc
+
+    def __add__(self, other ):
+        """Implementation of + operator - returns And"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, other ] )
+
+    def __radd__(self, other ):
+        """Implementation of + operator when left operand is not a C{ParserElement}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other + self
+
+    def __sub__(self, other):
+        """Implementation of - operator, returns C{And} with error stop"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, And._ErrorStop(), other ] )
+
+    def __rsub__(self, other ):
+        """Implementation of - operator when left operand is not a C{ParserElement}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other - self
+
+    def __mul__(self,other):
+        """Implementation of * operator, allows use of C{expr * 3} in place of
+           C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
+           tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
+           may also include C{None} as in:
+            - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+              to C{expr*n + ZeroOrMore(expr)}
+              (read as "at least n instances of C{expr}")
+            - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+              (read as "0 to n instances of C{expr}")
+            - C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
+            - C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
+
+           Note that C{expr*(None,n)} does not raise an exception if
+           more than n exprs exist in the input stream; that is,
+           C{expr*(None,n)} does not enforce a maximum number of expr
+           occurrences.  If this behavior is desired, then write
+           C{expr*(None,n) + ~expr}
+
+        """
+        if isinstance(other,int):
+            minElements, optElements = other,0
+        elif isinstance(other,tuple):
+            other = (other + (None, None))[:2]
+            if other[0] is None:
+                other = (0, other[1])
+            if isinstance(other[0],int) and other[1] is None:
+                if other[0] == 0:
+                    return ZeroOrMore(self)
+                if other[0] == 1:
+                    return OneOrMore(self)
+                else:
+                    return self*other[0] + ZeroOrMore(self)
+            elif isinstance(other[0],int) and isinstance(other[1],int):
+                minElements, optElements = other
+                optElements -= minElements
+            else:
+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+        else:
+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+        if minElements < 0:
+            raise ValueError("cannot multiply ParserElement by negative value")
+        if optElements < 0:
+            raise ValueError("second tuple value must be greater or equal to first tuple value")
+        if minElements == optElements == 0:
+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+        if (optElements):
+            def makeOptionalList(n):
+                if n>1:
+                    return Optional(self + makeOptionalList(n-1))
+                else:
+                    return Optional(self)
+            if minElements:
+                if minElements == 1:
+                    ret = self + makeOptionalList(optElements)
+                else:
+                    ret = And([self]*minElements) + makeOptionalList(optElements)
+            else:
+                ret = makeOptionalList(optElements)
+        else:
+            if minElements == 1:
+                ret = self
+            else:
+                ret = And([self]*minElements)
+        return ret
+
+    def __rmul__(self, other):
+        return self.__mul__(other)
+
+    def __or__(self, other ):
+        """Implementation of | operator - returns C{MatchFirst}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return MatchFirst( [ self, other ] )
+
+    def __ror__(self, other ):
+        """Implementation of | operator when left operand is not a C{ParserElement}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other | self
+
+    def __xor__(self, other ):
+        """Implementation of ^ operator - returns C{Or}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Or( [ self, other ] )
+
+    def __rxor__(self, other ):
+        """Implementation of ^ operator when left operand is not a C{ParserElement}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other ^ self
+
+    def __and__(self, other ):
+        """Implementation of & operator - returns C{Each}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Each( [ self, other ] )
+
+    def __rand__(self, other ):
+        """Implementation of & operator when left operand is not a C{ParserElement}"""
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other & self
+
+    def __invert__( self ):
+        """Implementation of ~ operator - returns C{NotAny}"""
+        return NotAny( self )
+
+    def __call__(self, name):
+        """Shortcut for C{setResultsName}, with C{listAllMatches=default}::
+             userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+           could be written as::
+             userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
+
+           If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+           passed as C{True}.
+           """
+        return self.setResultsName(name)
+
+    def suppress( self ):
+        """Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+           cluttering up returned output.
+        """
+        return Suppress( self )
+
+    def leaveWhitespace( self ):
+        """Disables the skipping of whitespace before matching the characters in the
+           C{ParserElement}'s defined pattern.  This is normally only used internally by
+           the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+        """
+        self.skipWhitespace = False
+        return self
+
+    def setWhitespaceChars( self, chars ):
+        """Overrides the default whitespace chars
+        """
+        self.skipWhitespace = True
+        self.whiteChars = chars
+        self.copyDefaultWhiteChars = False
+        return self
+
+    def parseWithTabs( self ):
+        """Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
+           Must be called before C{parseString} when the input grammar contains elements that
+           match C{<TAB>} characters."""
+        self.keepTabs = True
+        return self
+
+    def ignore( self, other ):
+        """Define expression to be ignored (e.g., comments) while doing pattern
+           matching; may be called repeatedly, to define multiple comment or other
+           ignorable patterns.
+        """
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                self.ignoreExprs.append( other.copy() )
+        else:
+            self.ignoreExprs.append( Suppress( other.copy() ) )
+        return self
+
+    def setDebugActions( self, startAction, successAction, exceptionAction ):
+        """Enable display of debugging messages while doing pattern matching."""
+        self.debugActions = (startAction or _defaultStartDebugAction,
+                             successAction or _defaultSuccessDebugAction,
+                             exceptionAction or _defaultExceptionDebugAction)
+        self.debug = True
+        return self
+
+    def setDebug( self, flag=True ):
+        """Enable display of debugging messages while doing pattern matching.
+           Set C{flag} to True to enable, False to disable."""
+        if flag:
+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+        else:
+            self.debug = False
+        return self
+
+    def __str__( self ):
+        return self.name
+
+    def __repr__( self ):
+        return _ustr(self)
+
+    def streamline( self ):
+        self.streamlined = True
+        self.strRepr = None
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        pass
+
+    def validate( self, validateTrace=[] ):
+        """Check defined expressions for valid structure, check for infinite recursive definitions."""
+        self.checkRecursion( [] )
+
+    def parseFile( self, file_or_filename, parseAll=False ):
+        """Execute the parse expression on the given file or filename.
+           If a filename is specified (instead of a file object),
+           the entire file is opened, read, and closed before parsing.
+        """
+        try:
+            file_contents = file_or_filename.read()
+        except AttributeError:
+            f = open(file_or_filename, "rb")
+            file_contents = f.read()
+            f.close()
+        try:
+            return self.parseString(file_contents, parseAll)
+        except ParseBaseException:
+            # catch and re-raise exception from here, clears out pyparsing internal stack trace
+            exc = sys.exc_info()[1]
+            raise exc
+
+    def getException(self):
+        return ParseException("",0,self.errmsg,self)
+
+    def __getattr__(self,aname):
+        if aname == "myException":
+            self.myException = ret = self.getException();
+            return ret;
+        else:
+            raise AttributeError("no such attribute " + aname)
+
+    def __eq__(self,other):
+        if isinstance(other, ParserElement):
+            return self is other or self.__dict__ == other.__dict__
+        elif isinstance(other, basestring):
+            try:
+                self.parseString(_ustr(other), parseAll=True)
+                return True
+            except ParseBaseException:
+                return False
+        else:
+            return super(ParserElement,self)==other
+
+    def __ne__(self,other):
+        return not (self == other)
+
+    def __hash__(self):
+        return hash(id(self))
+
+    def __req__(self,other):
+        return self == other
+
+    def __rne__(self,other):
+        return not (self == other)
+
+
+class Token(ParserElement):
+    """Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
+    def __init__( self ):
+        super(Token,self).__init__( savelist=False )
+
+    def setName(self, name):
+        s = super(Token,self).setName(name)
+        self.errmsg = "Expected " + self.name
+        return s
+
+
+class Empty(Token):
+    """An empty token, will always match."""
+    def __init__( self ):
+        super(Empty,self).__init__()
+        self.name = "Empty"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+
+class NoMatch(Token):
+    """A token that will never match."""
+    def __init__( self ):
+        super(NoMatch,self).__init__()
+        self.name = "NoMatch"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.errmsg = "Unmatchable token"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        exc = self.myException
+        exc.loc = loc
+        exc.pstr = instring
+        raise exc
+
+
+class Literal(Token):
+    """Token to exactly match a specified string."""
+    def __init__( self, matchString ):
+        super(Literal,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Literal; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+            self.__class__ = Empty
+        self.name = '"%s"' % _ustr(self.match)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+
+    # Performance tuning: this routine gets called a *lot*
+    # if this is a single character match string  and the first character matches,
+    # short-circuit as quickly as possible, and avoid calling startswith
+    #~ @profile
+    def parseImpl( self, instring, loc, doActions=True ):
+        if (instring[loc] == self.firstMatchChar and
+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+            return loc+self.matchLen, self.match
+        #~ raise ParseException( instring, loc, self.errmsg )
+        exc = self.myException
+        exc.loc = loc
+        exc.pstr = instring
+        raise exc
+_L = Literal
+
+class Keyword(Token):
+    """Token to exactly match a specified string as a keyword, that is, it must be
+       immediately followed by a non-keyword character.  Compare with C{Literal}::
+         Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+         Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+       Accepts two optional constructor arguments in addition to the keyword string:
+       C{identChars} is a string of characters that would be valid identifier characters,
+       defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
+       matching, default is C{False}.
+    """
+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+    def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
+        super(Keyword,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Keyword; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+        self.name = '"%s"' % self.match
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+        self.caseless = caseless
+        if caseless:
+            self.caselessmatch = matchString.upper()
+            identChars = identChars.upper()
+        self.identChars = set(identChars)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.caseless:
+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        else:
+            if (instring[loc] == self.firstMatchChar and
+                (self.matchLen==1 or instring.startswith(self.match,loc)) and
+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+                (loc == 0 or instring[loc-1] not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        #~ raise ParseException( instring, loc, self.errmsg )
+        exc = self.myException
+        exc.loc = loc
+        exc.pstr = instring
+        raise exc
+
+    def copy(self):
+        c = super(Keyword,self).copy()
+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        return c
+
+    def setDefaultKeywordChars( chars ):
+        """Overrides the default Keyword chars
+        """
+        Keyword.DEFAULT_KEYWORD_CHARS = chars
+    setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
+
+class CaselessLiteral(Literal):
+    """Token to match a specified string, ignoring case of letters.
+       Note: the matched results will always be in the case of the given
+       match string, NOT the case of the input text.
+    """
+    def __init__( self, matchString ):
+        super(CaselessLiteral,self).__init__( matchString.upper() )
+        # Preserve the defining literal.
+        self.returnString = matchString
+        self.name = "'%s'" % self.returnString
+        self.errmsg = "Expected " + self.name
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[ loc:loc+self.matchLen ].upper() == self.match:
+            return loc+self.matchLen, self.returnString
+        #~ raise ParseException( instring, loc, self.errmsg )
+        exc = self.myException
+        exc.loc = loc
+        exc.pstr = instring
+        raise exc
+
+class CaselessKeyword(Keyword):
+    def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+            return loc+self.matchLen, self.match
+        #~ raise ParseException( instring, loc, self.errmsg )
+        exc = self.myException
+        exc.loc = loc
+        exc.pstr = instring
+        raise exc
+
+class Word(Token):
+    """Token for matching words composed of allowed character sets.
+       Defined with string containing all allowed initial characters,
+       an optional string containing allowed body characters (if omitted,
+       defaults to the initial character set), and an optional minimum,
+       maximum, and/or exact length.  The default value for C{min} is 1 (a
+       minimum value < 1 is not valid); the default values for C{max} and C{exact}
+       are 0, meaning no maximum or exact length restriction. An optional
+       C{exclude} parameter can list characters that might be found in
+       the input C{bodyChars} string; useful to define a word of all printables
+       except for one or two characters, for instance.
+    """
+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+        super(Word,self).__init__()
+        if excludeChars:
+            initChars = ''.join([c for c in initChars if c not in excludeChars])
+            if bodyChars:
+                bodyChars = ''.join([c for c in bodyChars if c not in excludeChars])
+        self.initCharsOrig = initChars
+        self.initChars = set(initChars)
+        if bodyChars :
+            self.bodyCharsOrig = bodyChars
+            self.bodyChars = set(bodyChars)
+        else:
+            self.bodyCharsOrig = initChars
+            self.bodyChars = set(initChars)
+
+        self.maxSpecified = max > 0
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.asKeyword = asKeyword
+
+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+            if self.bodyCharsOrig == self.initCharsOrig:
+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+            elif len(self.bodyCharsOrig) == 1:
+                self.reString = "%s[%s]*" % \
+                                      (re.escape(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            else:
+                self.reString = "[%s][%s]*" % \
+                                      (_escapeRegexRangeChars(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            if self.asKeyword:
+                self.reString = r"\b"+self.reString+r"\b"
+            try:
+                self.re = re.compile( self.reString )
+            except:
+                self.re = None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.re:
+            result = self.re.match(instring,loc)
+            if not result:
+                exc = self.myException
+                exc.loc = loc
+                exc.pstr = instring
+                raise exc
+
+            loc = result.end()
+            return loc, result.group()
+
+        if not(instring[ loc ] in self.initChars):
+            #~ raise ParseException( instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+        start = loc
+        loc += 1
+        instrlen = len(instring)
+        bodychars = self.bodyChars
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, instrlen )
+        while loc < maxloc and instring[loc] in bodychars:
+            loc += 1
+
+        throwException = False
+        if loc - start < self.minLen:
+            throwException = True
+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+            throwException = True
+        if self.asKeyword:
+            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+                throwException = True
+
+        if throwException:
+            #~ raise ParseException( instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(Word,self).__str__()
+        except:
+            pass
+
+
+        if self.strRepr is None:
+
+            def charsAsStr(s):
+                if len(s)>4:
+                    return s[:4]+"..."
+                else:
+                    return s
+
+            if ( self.initCharsOrig != self.bodyCharsOrig ):
+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+            else:
+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+        return self.strRepr
+
+
+class Regex(Token):
+    """Token for matching strings that match a given regular expression.
+       Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+    """
+    compiledREtype = type(re.compile("[A-Z]"))
+    def __init__( self, pattern, flags=0):
+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+        super(Regex,self).__init__()
+
+        if isinstance(pattern, basestring):
+            if len(pattern) == 0:
+                warnings.warn("null string passed to Regex; use Empty() instead",
+                        SyntaxWarning, stacklevel=2)
+
+            self.pattern = pattern
+            self.flags = flags
+
+            try:
+                self.re = re.compile(self.pattern, self.flags)
+                self.reString = self.pattern
+            except sre_constants.error:
+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+                    SyntaxWarning, stacklevel=2)
+                raise
+
+        elif isinstance(pattern, Regex.compiledREtype):
+            self.re = pattern
+            self.pattern = \
+            self.reString = str(pattern)
+            self.flags = flags
+
+        else:
+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = self.re.match(instring,loc)
+        if not result:
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+        loc = result.end()
+        d = result.groupdict()
+        ret = ParseResults(result.group())
+        if d:
+            for k in d:
+                ret[k] = d[k]
+        return loc,ret
+
+    def __str__( self ):
+        try:
+            return super(Regex,self).__str__()
+        except:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+        return self.strRepr
+
+
+class QuotedString(Token):
+    """Token for matching strings that are delimited by quoting characters.
+    """
+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
+        """
+           Defined with the following parameters:
+            - quoteChar - string of one or more characters defining the quote delimiting string
+            - escChar - character to escape quotes, typically backslash (default=None)
+            - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
+            - multiline - boolean indicating whether quotes can span multiple lines (default=False)
+            - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
+            - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
+        """
+        super(QuotedString,self).__init__()
+
+        # remove white space from quote chars - wont work anyway
+        quoteChar = quoteChar.strip()
+        if len(quoteChar) == 0:
+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+            raise SyntaxError()
+
+        if endQuoteChar is None:
+            endQuoteChar = quoteChar
+        else:
+            endQuoteChar = endQuoteChar.strip()
+            if len(endQuoteChar) == 0:
+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+                raise SyntaxError()
+
+        self.quoteChar = quoteChar
+        self.quoteCharLen = len(quoteChar)
+        self.firstQuoteChar = quoteChar[0]
+        self.endQuoteChar = endQuoteChar
+        self.endQuoteCharLen = len(endQuoteChar)
+        self.escChar = escChar
+        self.escQuote = escQuote
+        self.unquoteResults = unquoteResults
+
+        if multiline:
+            self.flags = re.MULTILINE | re.DOTALL
+            self.pattern = r'%s(?:[^%s%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        else:
+            self.flags = 0
+            self.pattern = r'%s(?:[^%s\n\r%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        if len(self.endQuoteChar) > 1:
+            self.pattern += (
+                '|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
+                                    for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
+                )
+        if escQuote:
+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+        if escChar:
+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+            charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
+            self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+        try:
+            self.re = re.compile(self.pattern, self.flags)
+            self.reString = self.pattern
+        except sre_constants.error:
+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+                SyntaxWarning, stacklevel=2)
+            raise
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+        if not result:
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+        loc = result.end()
+        ret = result.group()
+
+        if self.unquoteResults:
+
+            # strip off quotes
+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+            if isinstance(ret,basestring):
+                # replace escaped characters
+                if self.escChar:
+                    ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
+
+                # replace escaped quotes
+                if self.escQuote:
+                    ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(QuotedString,self).__str__()
+        except:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+        return self.strRepr
+
+
+class CharsNotIn(Token):
+    """Token for matching words composed of characters *not* in a given set.
+       Defined with string containing all disallowed characters, and an optional
+       minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
+       minimum value < 1 is not valid); the default values for C{max} and C{exact}
+       are 0, meaning no maximum or exact length restriction.
+    """
+    def __init__( self, notChars, min=1, max=0, exact=0 ):
+        super(CharsNotIn,self).__init__()
+        self.skipWhitespace = False
+        self.notChars = notChars
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = ( self.minLen == 0 )
+        self.mayIndexError = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] in self.notChars:
+            #~ raise ParseException( instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+        start = loc
+        loc += 1
+        notchars = self.notChars
+        maxlen = min( start+self.maxLen, len(instring) )
+        while loc < maxlen and \
+              (instring[loc] not in notchars):
+            loc += 1
+
+        if loc - start < self.minLen:
+            #~ raise ParseException( instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(CharsNotIn, self).__str__()
+        except:
+            pass
+
+        if self.strRepr is None:
+            if len(self.notChars) > 4:
+                self.strRepr = "!W:(%s...)" % self.notChars[:4]
+            else:
+                self.strRepr = "!W:(%s)" % self.notChars
+
+        return self.strRepr
+
+class White(Token):
+    """Special matching class for matching whitespace.  Normally, whitespace is ignored
+       by pyparsing grammars.  This class is included when some whitespace structures
+       are significant.  Define with a string containing the whitespace characters to be
+       matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
+       as defined for the C{Word} class."""
+    whiteStrs = {
+        " " : "<SPC>",
+        "\t": "<TAB>",
+        "\n": "<LF>",
+        "\r": "<CR>",
+        "\f": "<FF>",
+        }
+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+        super(White,self).__init__()
+        self.matchWhite = ws
+        self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
+        #~ self.leaveWhitespace()
+        self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
+        self.mayReturnEmpty = True
+        self.errmsg = "Expected " + self.name
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if not(instring[ loc ] in self.matchWhite):
+            #~ raise ParseException( instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+        start = loc
+        loc += 1
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, len(instring) )
+        while loc < maxloc and instring[loc] in self.matchWhite:
+            loc += 1
+
+        if loc - start < self.minLen:
+            #~ raise ParseException( instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+        return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+    def __init__( self ):
+        super(_PositionToken,self).__init__()
+        self.name=self.__class__.__name__
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+    """Token to advance to a specific column of input text; useful for tabular report scraping."""
+    def __init__( self, colno ):
+        super(GoToColumn,self).__init__()
+        self.col = colno
+
+    def preParse( self, instring, loc ):
+        if col(loc,instring) != self.col:
+            instrlen = len(instring)
+            if self.ignoreExprs:
+                loc = self._skipIgnorables( instring, loc )
+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+                loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        thiscol = col( loc, instring )
+        if thiscol > self.col:
+            raise ParseException( instring, loc, "Text not in expected column", self )
+        newloc = loc + self.col - thiscol
+        ret = instring[ loc: newloc ]
+        return newloc, ret
+
+class LineStart(_PositionToken):
+    """Matches if current position is at the beginning of a line within the parse string"""
+    def __init__( self ):
+        super(LineStart,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected start of line"
+
+    def preParse( self, instring, loc ):
+        preloc = super(LineStart,self).preParse(instring,loc)
+        if instring[preloc] == "\n":
+            loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if not( loc==0 or
+            (loc == self.preParse( instring, 0 )) or
+            (instring[loc-1] == "\n") ): #col(loc, instring) != 1:
+            #~ raise ParseException( instring, loc, "Expected start of line" )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+        return loc, []
+
+class LineEnd(_PositionToken):
+    """Matches if current position is at the end of a line within the parse string"""
+    def __init__( self ):
+        super(LineEnd,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected end of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc<len(instring):
+            if instring[loc] == "\n":
+                return loc+1, "\n"
+            else:
+                #~ raise ParseException( instring, loc, "Expected end of line" )
+                exc = self.myException
+                exc.loc = loc
+                exc.pstr = instring
+                raise exc
+        elif loc == len(instring):
+            return loc+1, []
+        else:
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+class StringStart(_PositionToken):
+    """Matches if current position is at the beginning of the parse string"""
+    def __init__( self ):
+        super(StringStart,self).__init__()
+        self.errmsg = "Expected start of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc != 0:
+            # see if entire string up to here is just whitespace and ignoreables
+            if loc != self.preParse( instring, 0 ):
+                #~ raise ParseException( instring, loc, "Expected start of text" )
+                exc = self.myException
+                exc.loc = loc
+                exc.pstr = instring
+                raise exc
+        return loc, []
+
+class StringEnd(_PositionToken):
+    """Matches if current position is at the end of the parse string"""
+    def __init__( self ):
+        super(StringEnd,self).__init__()
+        self.errmsg = "Expected end of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc < len(instring):
+            #~ raise ParseException( instring, loc, "Expected end of text" )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+        elif loc == len(instring):
+            return loc+1, []
+        elif loc > len(instring):
+            return loc, []
+        else:
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+
+class WordStart(_PositionToken):
+    """Matches if the current position is at the beginning of a Word, and
+       is not preceded by any character in a given set of C{wordChars}
+       (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+       use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+       the string being parsed, or at the beginning of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordStart,self).__init__()
+        self.wordChars = set(wordChars)
+        self.errmsg = "Not at the start of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        if loc != 0:
+            if (instring[loc-1] in self.wordChars or
+                instring[loc] not in self.wordChars):
+                exc = self.myException
+                exc.loc = loc
+                exc.pstr = instring
+                raise exc
+        return loc, []
+
+class WordEnd(_PositionToken):
+    """Matches if the current position is at the end of a Word, and
+       is not followed by any character in a given set of C{wordChars}
+       (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+       use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+       the string being parsed, or at the end of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordEnd,self).__init__()
+        self.wordChars = set(wordChars)
+        self.skipWhitespace = False
+        self.errmsg = "Not at the end of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        instrlen = len(instring)
+        if instrlen>0 and loc<instrlen:
+            if (instring[loc] in self.wordChars or
+                instring[loc-1] not in self.wordChars):
+                #~ raise ParseException( instring, loc, "Expected end of word" )
+                exc = self.myException
+                exc.loc = loc
+                exc.pstr = instring
+                raise exc
+        return loc, []
+
+
+class ParseExpression(ParserElement):
+    """Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
+    def __init__( self, exprs, savelist = False ):
+        super(ParseExpression,self).__init__(savelist)
+        if isinstance( exprs, list ):
+            self.exprs = exprs
+        elif isinstance( exprs, basestring ):
+            self.exprs = [ Literal( exprs ) ]
+        else:
+            try:
+                self.exprs = list( exprs )
+            except TypeError:
+                self.exprs = [ exprs ]
+        self.callPreparse = False
+
+    def __getitem__( self, i ):
+        return self.exprs[i]
+
+    def append( self, other ):
+        self.exprs.append( other )
+        self.strRepr = None
+        return self
+
+    def leaveWhitespace( self ):
+        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
+           all contained expressions."""
+        self.skipWhitespace = False
+        self.exprs = [ e.copy() for e in self.exprs ]
+        for e in self.exprs:
+            e.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseExpression, self).ignore( other )
+                for e in self.exprs:
+                    e.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseExpression, self).ignore( other )
+            for e in self.exprs:
+                e.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def __str__( self ):
+        try:
+            return super(ParseExpression,self).__str__()
+        except:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+        return self.strRepr
+
+    def streamline( self ):
+        super(ParseExpression,self).streamline()
+
+        for e in self.exprs:
+            e.streamline()
+
+        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+        # but only if there are no parse actions or resultsNames on the nested And's
+        # (likewise for Or's and MatchFirst's)
+        if ( len(self.exprs) == 2 ):
+            other = self.exprs[0]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = other.exprs[:] + [ self.exprs[1] ]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+            other = self.exprs[-1]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = self.exprs[:-1] + other.exprs[:]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
+        return ret
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        for e in self.exprs:
+            e.validate(tmp)
+        self.checkRecursion( [] )
+
+    def copy(self):
+        ret = super(ParseExpression,self).copy()
+        ret.exprs = [e.copy() for e in self.exprs]
+        return ret
+
+class And(ParseExpression):
+    """Requires all given C{ParseExpression}s to be found in the given order.
+       Expressions may be separated by whitespace.
+       May be constructed using the C{'+'} operator.
+    """
+
+    class _ErrorStop(Empty):
+        def __init__(self, *args, **kwargs):
+            super(Empty,self).__init__(*args, **kwargs)
+            self.leaveWhitespace()
+
+    def __init__( self, exprs, savelist = True ):
+        super(And,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = True
+        for e in self.exprs:
+            if not e.mayReturnEmpty:
+                self.mayReturnEmpty = False
+                break
+        self.setWhitespaceChars( exprs[0].whiteChars )
+        self.skipWhitespace = exprs[0].skipWhitespace
+        self.callPreparse = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        # pass False as last arg to _parse for first element, since we already
+        # pre-parsed the string as part of our And pre-parsing
+        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+        errorStop = False
+        for e in self.exprs[1:]:
+            if isinstance(e, And._ErrorStop):
+                errorStop = True
+                continue
+            if errorStop:
+                try:
+                    loc, exprtokens = e._parse( instring, loc, doActions )
+                except ParseSyntaxException:
+                    raise
+                except ParseBaseException:
+                    pe = sys.exc_info()[1]
+                    raise ParseSyntaxException(pe)
+                except IndexError:
+                    raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
+            else:
+                loc, exprtokens = e._parse( instring, loc, doActions )
+            if exprtokens or exprtokens.keys():
+                resultlist += exprtokens
+        return loc, resultlist
+
+    def __iadd__(self, other ):
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        return self.append( other ) #And( [ self, other ] )
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+            if not e.mayReturnEmpty:
+                break
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
+
+        return self.strRepr
+
+
+class Or(ParseExpression):
+    """Requires that at least one C{ParseExpression} is found.
+       If two expressions match, the expression that matches the longest string will be used.
+       May be constructed using the C{'^'} operator.
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(Or,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = False
+        for e in self.exprs:
+            if e.mayReturnEmpty:
+                self.mayReturnEmpty = True
+                break
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxMatchLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                loc2 = e.tryParse( instring, loc )
+            except ParseException:
+                err = sys.exc_info()[1]
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+            else:
+                if loc2 > maxMatchLoc:
+                    maxMatchLoc = loc2
+                    maxMatchExp = e
+
+        if maxMatchLoc < 0:
+            if maxException is not None:
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+        return maxMatchExp._parse( instring, loc, doActions )
+
+    def __ixor__(self, other ):
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        return self.append( other ) #Or( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+    """Requires that at least one C{ParseExpression} is found.
+       If two expressions match, the first one listed is the one that will match.
+       May be constructed using the C{'|'} operator.
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(MatchFirst,self).__init__(exprs, savelist)
+        if exprs:
+            self.mayReturnEmpty = False
+            for e in self.exprs:
+                if e.mayReturnEmpty:
+                    self.mayReturnEmpty = True
+                    break
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                ret = e._parse( instring, loc, doActions )
+                return ret
+            except ParseException, err:
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+
+        # only got here if no expression matched, raise exception for match that made it the furthest
+        else:
+            if maxException is not None:
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+    def __ior__(self, other ):
+        if isinstance( other, basestring ):
+            other = Literal( other )
+        return self.append( other ) #MatchFirst( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+    """Requires all given C{ParseExpression}s to be found, but in any order.
+       Expressions may be separated by whitespace.
+       May be constructed using the C{'&'} operator.
+    """
+    def __init__( self, exprs, savelist = True ):
+        super(Each,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = True
+        for e in self.exprs:
+            if not e.mayReturnEmpty:
+                self.mayReturnEmpty = False
+                break
+        self.skipWhitespace = True
+        self.initExprGroups = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.initExprGroups:
+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
+            self.optionals = opt1 + opt2
+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+            self.required += self.multirequired
+            self.initExprGroups = False
+        tmpLoc = loc
+        tmpReqd = self.required[:]
+        tmpOpt  = self.optionals[:]
+        matchOrder = []
+
+        keepMatching = True
+        while keepMatching:
+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+            failed = []
+            for e in tmpExprs:
+                try:
+                    tmpLoc = e.tryParse( instring, tmpLoc )
+                except ParseException:
+                    failed.append(e)
+                else:
+                    matchOrder.append(e)
+                    if e in tmpReqd:
+                        tmpReqd.remove(e)
+                    elif e in tmpOpt:
+                        tmpOpt.remove(e)
+            if len(failed) == len(tmpExprs):
+                keepMatching = False
+
+        if tmpReqd:
+            missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+        # add any unmatched Optionals, in case they have default values defined
+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+        resultlist = []
+        for e in matchOrder:
+            loc,results = e._parse(instring,loc,doActions)
+            resultlist.append(results)
+
+        finalResults = ParseResults([])
+        for r in resultlist:
+            dups = {}
+            for k in r.keys():
+                if k in finalResults.keys():
+                    tmp = ParseResults(finalResults[k])
+                    tmp += ParseResults(r[k])
+                    dups[k] = tmp
+            finalResults += ParseResults(r)
+            for k,v in dups.items():
+                finalResults[k] = v
+        return loc, finalResults
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+    """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
+    def __init__( self, expr, savelist=False ):
+        super(ParseElementEnhance,self).__init__(savelist)
+        if isinstance( expr, basestring ):
+            expr = Literal(expr)
+        self.expr = expr
+        self.strRepr = None
+        if expr is not None:
+            self.mayIndexError = expr.mayIndexError
+            self.mayReturnEmpty = expr.mayReturnEmpty
+            self.setWhitespaceChars( expr.whiteChars )
+            self.skipWhitespace = expr.skipWhitespace
+            self.saveAsList = expr.saveAsList
+            self.callPreparse = expr.callPreparse
+            self.ignoreExprs.extend(expr.ignoreExprs)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr is not None:
+            return self.expr._parse( instring, loc, doActions, callPreParse=False )
+        else:
+            raise ParseException("",loc,self.errmsg,self)
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        self.expr = self.expr.copy()
+        if self.expr is not None:
+            self.expr.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseElementEnhance, self).ignore( other )
+                if self.expr is not None:
+                    self.expr.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseElementEnhance, self).ignore( other )
+            if self.expr is not None:
+                self.expr.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def streamline( self ):
+        super(ParseElementEnhance,self).streamline()
+        if self.expr is not None:
+            self.expr.streamline()
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        if self in parseElementList:
+            raise RecursiveGrammarException( parseElementList+[self] )
+        subRecCheckList = parseElementList[:] + [ self ]
+        if self.expr is not None:
+            self.expr.checkRecursion( subRecCheckList )
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        if self.expr is not None:
+            self.expr.validate(tmp)
+        self.checkRecursion( [] )
+
+    def __str__( self ):
+        try:
+            return super(ParseElementEnhance,self).__str__()
+        except:
+            pass
+
+        if self.strRepr is None and self.expr is not None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+        return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+    """Lookahead matching of the given parse expression.  C{FollowedBy}
+    does *not* advance the parsing position within the input string, it only
+    verifies that the specified parse expression matches at the current
+    position.  C{FollowedBy} always returns a null token list."""
+    def __init__( self, expr ):
+        super(FollowedBy,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self.expr.tryParse( instring, loc )
+        return loc, []
+
+
+class NotAny(ParseElementEnhance):
+    """Lookahead to disallow matching with the given parse expression.  C{NotAny}
+    does *not* advance the parsing position within the input string, it only
+    verifies that the specified parse expression does *not* match at the current
+    position.  Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
+    always returns a null token list.  May be constructed using the '~' operator."""
+    def __init__( self, expr ):
+        super(NotAny,self).__init__(expr)
+        #~ self.leaveWhitespace()
+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+        self.mayReturnEmpty = True
+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            self.expr.tryParse( instring, loc )
+        except (ParseException,IndexError):
+            pass
+        else:
+            #~ raise ParseException(instring, loc, self.errmsg )
+            exc = self.myException
+            exc.loc = loc
+            exc.pstr = instring
+            raise exc
+        return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+        return self.strRepr
+
+
+class ZeroOrMore(ParseElementEnhance):
+    """Optional repetition of zero or more of the given expression."""
+    def __init__( self, expr ):
+        super(ZeroOrMore,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        tokens = []
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+            hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
+            while 1:
+                if hasIgnoreExprs:
+                    preloc = self._skipIgnorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self.expr._parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.keys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+        return self.strRepr
+
+    def setResultsName( self, name, listAllMatches=False ):
+        ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
+        ret.saveAsList = True
+        return ret
+
+
+class OneOrMore(ParseElementEnhance):
+    """Repetition of one or more of the given expression."""
+    def parseImpl( self, instring, loc, doActions=True ):
+        # must be at least one
+        loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        try:
+            hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
+            while 1:
+                if hasIgnoreExprs:
+                    preloc = self._skipIgnorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self.expr._parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.keys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+        return self.strRepr
+
+    def setResultsName( self, name, listAllMatches=False ):
+        ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
+        ret.saveAsList = True
+        return ret
+
+class _NullToken(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+    def __str__(self):
+        return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+    """Optional matching of the given expression.
+       A default return string can also be specified, if the optional expression
+       is not found.
+    """
+    def __init__( self, exprs, default=_optionalNotMatched ):
+        super(Optional,self).__init__( exprs, savelist=False )
+        self.defaultValue = default
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        except (ParseException,IndexError):
+            if self.defaultValue is not _optionalNotMatched:
+                if self.expr.resultsName:
+                    tokens = ParseResults([ self.defaultValue ])
+                    tokens[self.expr.resultsName] = self.defaultValue
+                else:
+                    tokens = [ self.defaultValue ]
+            else:
+                tokens = []
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]"
+
+        return self.strRepr
+
+
+class SkipTo(ParseElementEnhance):
+    """Token for skipping over all undefined text until the matched expression is found.
+       If C{include} is set to true, the matched expression is also parsed (the skipped text
+       and matched expression are returned as a 2-element list).  The C{ignore}
+       argument is used to define grammars (typically quoted strings and comments) that
+       might contain false matches.
+    """
+    def __init__( self, other, include=False, ignore=None, failOn=None ):
+        super( SkipTo, self ).__init__( other )
+        self.ignoreExpr = ignore
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.includeMatch = include
+        self.asList = False
+        if failOn is not None and isinstance(failOn, basestring):
+            self.failOn = Literal(failOn)
+        else:
+            self.failOn = failOn
+        self.errmsg = "No match found for "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        startLoc = loc
+        instrlen = len(instring)
+        expr = self.expr
+        failParse = False
+        while loc <= instrlen:
+            try:
+                if self.failOn:
+                    try:
+                        self.failOn.tryParse(instring, loc)
+                    except ParseBaseException:
+                        pass
+                    else:
+                        failParse = True
+                        raise ParseException(instring, loc, "Found expression " + str(self.failOn))
+                    failParse = False
+                if self.ignoreExpr is not None:
+                    while 1:
+                        try:
+                            loc = self.ignoreExpr.tryParse(instring,loc)
+                            # print "found ignoreExpr, advance to", loc
+                        except ParseBaseException:
+                            break
+                expr._parse( instring, loc, doActions=False, callPreParse=False )
+                skipText = instring[startLoc:loc]
+                if self.includeMatch:
+                    loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
+                    if mat:
+                        skipRes = ParseResults( skipText )
+                        skipRes += mat
+                        return loc, [ skipRes ]
+                    else:
+                        return loc, [ skipText ]
+                else:
+                    return loc, [ skipText ]
+            except (ParseException,IndexError):
+                if failParse:
+                    raise
+                else:
+                    loc += 1
+        exc = self.myException
+        exc.loc = loc
+        exc.pstr = instring
+        raise exc
+
+class Forward(ParseElementEnhance):
+    """Forward declaration of an expression to be defined later -
+       used for recursive grammars, such as algebraic infix notation.
+       When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+       Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+       Specifically, '|' has a lower precedence than '<<', so that::
+          fwdExpr << a | b | c
+       will actually be evaluated as::
+          (fwdExpr << a) | b | c
+       thereby leaving b and c out as parseable alternatives.  It is recommended that you
+       explicitly group the values inserted into the C{Forward}::
+          fwdExpr << (a | b | c)
+    """
+    def __init__( self, other=None ):
+        super(Forward,self).__init__( other, savelist=False )
+
+    def __lshift__( self, other ):
+        if isinstance( other, basestring ):
+            other = Literal(other)
+        self.expr = other
+        self.mayReturnEmpty = other.mayReturnEmpty
+        self.strRepr = None
+        self.mayIndexError = self.expr.mayIndexError
+        self.mayReturnEmpty = self.expr.mayReturnEmpty
+        self.setWhitespaceChars( self.expr.whiteChars )
+        self.skipWhitespace = self.expr.skipWhitespace
+        self.saveAsList = self.expr.saveAsList
+        self.ignoreExprs.extend(self.expr.ignoreExprs)
+        return None
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        return self
+
+    def streamline( self ):
+        if not self.streamlined:
+            self.streamlined = True
+            if self.expr is not None:
+                self.expr.streamline()
+        return self
+
+    def validate( self, validateTrace=[] ):
+        if self not in validateTrace:
+            tmp = validateTrace[:]+[self]
+            if self.expr is not None:
+                self.expr.validate(tmp)
+        self.checkRecursion([])
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        self._revertClass = self.__class__
+        self.__class__ = _ForwardNoRecurse
+        try:
+            if self.expr is not None:
+                retString = _ustr(self.expr)
+            else:
+                retString = "None"
+        finally:
+            self.__class__ = self._revertClass
+        return self.__class__.__name__ + ": " + retString
+
+    def copy(self):
+        if self.expr is not None:
+            return super(Forward,self).copy()
+        else:
+            ret = Forward()
+            ret << self
+            return ret
+
+class _ForwardNoRecurse(Forward):
+    def __str__( self ):
+        return "..."
+
+class TokenConverter(ParseElementEnhance):
+    """Abstract subclass of C{ParseExpression}, for converting parsed results."""
+    def __init__( self, expr, savelist=False ):
+        super(TokenConverter,self).__init__( expr )#, savelist )
+        self.saveAsList = False
+
+class Upcase(TokenConverter):
+    """Converter to upper case all matching tokens."""
+    def __init__(self, *args):
+        super(Upcase,self).__init__(*args)
+        warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
+                       DeprecationWarning,stacklevel=2)
+
+    def postParse( self, instring, loc, tokenlist ):
+        return list(map( string.upper, tokenlist ))
+
+
+class Combine(TokenConverter):
+    """Converter to concatenate all matching tokens to a single string.
+       By default, the matching patterns must also be contiguous in the input string;
+       this can be disabled by specifying C{'adjacent=False'} in the constructor.
+    """
+    def __init__( self, expr, joinString="", adjacent=True ):
+        super(Combine,self).__init__( expr )
+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+        if adjacent:
+            self.leaveWhitespace()
+        self.adjacent = adjacent
+        self.skipWhitespace = True
+        self.joinString = joinString
+        self.callPreparse = True
+
+    def ignore( self, other ):
+        if self.adjacent:
+            ParserElement.ignore(self, other)
+        else:
+            super( Combine, self).ignore( other )
+        return self
+
+    def postParse( self, instring, loc, tokenlist ):
+        retToks = tokenlist.copy()
+        del retToks[:]
+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+        if self.resultsName and len(retToks.keys())>0:
+            return [ retToks ]
+        else:
+            return retToks
+
+class Group(TokenConverter):
+    """Converter to return the matched tokens as a list - useful for returning tokens of C{ZeroOrMore} and C{OneOrMore} expressions."""
+    def __init__( self, expr ):
+        super(Group,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        return [ tokenlist ]
+
+class Dict(TokenConverter):
+    """Converter to return a repetitive expression as a list, but also as a dictionary.
+       Each element can also be referenced using the first token in the expression as its key.
+       Useful for tabular report scraping when the first column can be used as a item key.
+    """
+    def __init__( self, exprs ):
+        super(Dict,self).__init__( exprs )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        for i,tok in enumerate(tokenlist):
+            if len(tok) == 0:
+                continue
+            ikey = tok[0]
+            if isinstance(ikey,int):
+                ikey = _ustr(tok[0]).strip()
+            if len(tok)==1:
+                tokenlist[ikey] = _ParseResultsWithOffset("",i)
+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+            else:
+                dictvalue = tok.copy() #ParseResults(i)
+                del dictvalue[0]
+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+                else:
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+        if self.resultsName:
+            return [ tokenlist ]
+        else:
+            return tokenlist
+
+
+class Suppress(TokenConverter):
+    """Converter for ignoring the results of a parsed expression."""
+    def postParse( self, instring, loc, tokenlist ):
+        return []
+
+    def suppress( self ):
+        return self
+
+
+class OnlyOnce(object):
+    """Wrapper for parse actions, to ensure they are only called once."""
+    def __init__(self, methodCall):
+        self.callable = _trim_arity(methodCall)
+        self.called = False
+    def __call__(self,s,l,t):
+        if not self.called:
+            results = self.callable(s,l,t)
+            self.called = True
+            return results
+        raise ParseException(s,l,"")
+    def reset(self):
+        self.called = False
+
+def traceParseAction(f):
+    """Decorator for debugging parse actions."""
+    f = _trim_arity(f)
+    def z(*paArgs):
+        thisFunc = f.func_name
+        s,l,t = paArgs[-3:]
+        if len(paArgs)>3:
+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+        sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
+        try:
+            ret = f(*paArgs)
+        except Exception:
+            exc = sys.exc_info()[1]
+            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+            raise
+        sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
+        return ret
+    try:
+        z.__name__ = f.__name__
+    except AttributeError:
+        pass
+    return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+    """Helper to define a delimited list of expressions - the delimiter defaults to ','.
+       By default, the list elements and delimiters can have intervening whitespace, and
+       comments, but this can be overridden by passing C{combine=True} in the constructor.
+       If C{combine} is set to True, the matching tokens are returned as a single token
+       string, with the delimiters included; otherwise, the matching tokens are returned
+       as a list of tokens, with the delimiters suppressed.
+    """
+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+    if combine:
+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+    else:
+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+    """Helper to define a counted list of expressions.
+       This helper defines a pattern of the form::
+           integer expr expr expr...
+       where the leading integer tells how many expr expressions follow.
+       The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+    """
+    arrayExpr = Forward()
+    def countFieldParseAction(s,l,t):
+        n = t[0]
+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+        return []
+    if intExpr is None:
+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+    else:
+        intExpr = intExpr.copy()
+    intExpr.setName("arrayLen")
+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+    return ( intExpr + arrayExpr )
+
+def _flatten(L):
+    ret = []
+    for i in L:
+        if isinstance(i,list):
+            ret.extend(_flatten(i))
+        else:
+            ret.append(i)
+    return ret
+
+def matchPreviousLiteral(expr):
+    """Helper to define an expression that is indirectly defined from
+       the tokens matched in a previous expression, that is, it looks
+       for a 'repeat' of a previous expression.  For example::
+           first = Word(nums)
+           second = matchPreviousLiteral(first)
+           matchExpr = first + ":" + second
+       will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
+       previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+       If this is not desired, use C{matchPreviousExpr}.
+       Do *not* use with packrat parsing enabled.
+    """
+    rep = Forward()
+    def copyTokenToRepeater(s,l,t):
+        if t:
+            if len(t) == 1:
+                rep << t[0]
+            else:
+                # flatten t tokens
+                tflat = _flatten(t.asList())
+                rep << And( [ Literal(tt) for tt in tflat ] )
+        else:
+            rep << Empty()
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    return rep
+
+def matchPreviousExpr(expr):
+    """Helper to define an expression that is indirectly defined from
+       the tokens matched in a previous expression, that is, it looks
+       for a 'repeat' of a previous expression.  For example::
+           first = Word(nums)
+           second = matchPreviousExpr(first)
+           matchExpr = first + ":" + second
+       will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
+       expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
+       the expressions are evaluated first, and then compared, so
+       C{"1"} is compared with C{"10"}.
+       Do *not* use with packrat parsing enabled.
+    """
+    rep = Forward()
+    e2 = expr.copy()
+    rep << e2
+    def copyTokenToRepeater(s,l,t):
+        matchTokens = _flatten(t.asList())
+        def mustMatchTheseTokens(s,l,t):
+            theseTokens = _flatten(t.asList())
+            if  theseTokens != matchTokens:
+                raise ParseException("",0,"")
+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    return rep
+
+def _escapeRegexRangeChars(s):
+    #~  escape these chars: ^-]
+    for c in r"\^-]":
+        s = s.replace(c,_bslash+c)
+    s = s.replace("\n",r"\n")
+    s = s.replace("\t",r"\t")
+    return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+    """Helper to quickly define a set of alternative Literals, and makes sure to do
+       longest-first testing when there is a conflict, regardless of the input order,
+       but returns a C{MatchFirst} for best performance.
+
+       Parameters:
+        - strs - a string of space-delimited literals, or a list of string literals
+        - caseless - (default=False) - treat all literals as caseless
+        - useRegex - (default=True) - as an optimization, will generate a Regex
+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+          if creating a C{Regex} raises an exception)
+    """
+    if caseless:
+        isequal = ( lambda a,b: a.upper() == b.upper() )
+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+        parseElementClass = CaselessLiteral
+    else:
+        isequal = ( lambda a,b: a == b )
+        masks = ( lambda a,b: b.startswith(a) )
+        parseElementClass = Literal
+
+    if isinstance(strs,(list,tuple)):
+        symbols = list(strs[:])
+    elif isinstance(strs,basestring):
+        symbols = strs.split()
+    else:
+        warnings.warn("Invalid argument to oneOf, expected string or list",
+                SyntaxWarning, stacklevel=2)
+
+    i = 0
+    while i < len(symbols)-1:
+        cur = symbols[i]
+        for j,other in enumerate(symbols[i+1:]):
+            if ( isequal(other, cur) ):
+                del symbols[i+j+1]
+                break
+            elif ( masks(cur, other) ):
+                del symbols[i+j+1]
+                symbols.insert(i,other)
+                cur = other
+                break
+        else:
+            i += 1
+
+    if not caseless and useRegex:
+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+        try:
+            if len(symbols)==len("".join(symbols)):
+                return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
+            else:
+                return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
+        except:
+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+                    SyntaxWarning, stacklevel=2)
+
+
+    # last resort, just use MatchFirst
+    return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
+
+def dictOf( key, value ):
+    """Helper to easily and clearly define a dictionary by specifying the respective patterns
+       for the key and value.  Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
+       in the proper order.  The key pattern can include delimiting markers or punctuation,
+       as long as they are suppressed, thereby leaving the significant key text.  The value
+       pattern can include named results, so that the C{Dict} results can include named token
+       fields.
+    """
+    return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+    """Helper to return the original, untokenized text for a given expression.  Useful to
+       restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+       revert separate tokens with intervening whitespace back to the original matching
+       input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
+       require the inspect module to chase up the call stack.  By default, returns a
+       string containing the original parsed text.
+
+       If the optional C{asString} argument is passed as C{False}, then the return value is a
+       C{ParseResults} containing any results names that were originally matched, and a
+       single token containing the original matched text from the input string.  So if
+       the expression passed to C{L{originalTextFor}} contains expressions with defined
+       results names, you must set C{asString} to C{False} if you want to preserve those
+       results name values."""
+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+    endlocMarker = locMarker.copy()
+    endlocMarker.callPreparse = False
+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+    if asString:
+        extractText = lambda s,l,t: s[t._original_start:t._original_end]
+    else:
+        def extractText(s,l,t):
+            del t[:]
+            t.insert(0, s[t._original_start:t._original_end])
+            del t["_original_start"]
+            del t["_original_end"]
+    matchExpr.setParseAction(extractText)
+    return matchExpr
+
+def ungroup(expr):
+    """Helper to undo pyparsing's default grouping of And expressions, even
+       if all but one are non-empty."""
+    return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+# convenience constants for positional expressions
+empty       = Empty().setName("empty")
+lineStart   = LineStart().setName("lineStart")
+lineEnd     = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd   = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_printables_less_backslash = "".join([ c for c in printables if c not in  r"\]" ])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
+
+def srange(s):
+    r"""Helper to easily define string ranges for use in Word construction.  Borrows
+       syntax from regexp '[]' string range definitions::
+          srange("[0-9]")   -> "0123456789"
+          srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
+          srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+       The input string must be enclosed in []'s, and the returned string is the expanded
+       character set joined into a single string.
+       The values enclosed in the []'s may be::
+          a single character
+          an escaped character with a leading backslash (such as \- or \])
+          an escaped hex character with a leading '\x' (\x21, which is a '!' character)
+            (\0x## is also supported for backwards compatibility)
+          an escaped octal character with a leading '\0' (\041, which is a '!' character)
+          a range of any of the above, separated by a dash ('a-z', etc.)
+          any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
+    """
+    try:
+        return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
+    except:
+        return ""
+
+def matchOnlyAtCol(n):
+    """Helper method for defining parse actions that require matching at a specific
+       column in the input text.
+    """
+    def verifyCol(strg,locn,toks):
+        if col(locn,strg) != n:
+            raise ParseException(strg,locn,"matched token not at column %d" % n)
+    return verifyCol
+
+def replaceWith(replStr):
+    """Helper method for common parse actions that simply return a literal value.  Especially
+       useful when used with C{transformString()}.
+    """
+    def _replFunc(*args):
+        return [replStr]
+    return _replFunc
+
+def removeQuotes(s,l,t):
+    """Helper parse action for removing quotation marks from parsed quoted strings.
+       To use, add this parse action to quoted string using::
+         quotedString.setParseAction( removeQuotes )
+    """
+    return t[0][1:-1]
+
+def upcaseTokens(s,l,t):
+    """Helper parse action to convert tokens to upper case."""
+    return [ tt.upper() for tt in map(_ustr,t) ]
+
+def downcaseTokens(s,l,t):
+    """Helper parse action to convert tokens to lower case."""
+    return [ tt.lower() for tt in map(_ustr,t) ]
+
+def keepOriginalText(s,startLoc,t):
+    """DEPRECATED - use new helper method C{originalTextFor}.
+       Helper parse action to preserve original parsed text,
+       overriding any nested parse actions."""
+    try:
+        endloc = getTokensEndLoc()
+    except ParseException:
+        raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
+    del t[:]
+    t += ParseResults(s[startLoc:endloc])
+    return t
+
+def getTokensEndLoc():
+    """Method to be called from within a parse action to determine the end
+       location of the parsed tokens."""
+    import inspect
+    fstack = inspect.stack()
+    try:
+        # search up the stack (through intervening argument normalizers) for correct calling routine
+        for f in fstack[2:]:
+            if f[3] == "_parseNoCache":
+                endloc = f[0].f_locals["loc"]
+                return endloc
+        else:
+            raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
+    finally:
+        del fstack
+
+def _makeTags(tagStr, xml):
+    """Internal helper to construct opening and closing tag expressions, given a tag name"""
+    if isinstance(tagStr,basestring):
+        resname = tagStr
+        tagStr = Keyword(tagStr, caseless=not xml)
+    else:
+        resname = tagStr.name
+
+    tagAttrName = Word(alphas,alphanums+"_-:")
+    if (xml):
+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    else:
+        printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+                Optional( Suppress("=") + tagAttrValue ) ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    closeTag = Combine(_L("</") + tagStr + ">")
+
+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
+    openTag.tag = resname
+    closeTag.tag = resname
+    return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+    """Helper to construct opening and closing tag expressions for HTML, given a tag name"""
+    return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+    """Helper to construct opening and closing tag expressions for XML, given a tag name"""
+    return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+    """Helper to create a validating parse action to be used with start tags created
+       with C{makeXMLTags} or C{makeHTMLTags}. Use C{withAttribute} to qualify a starting tag
+       with a required attribute value, to avoid false matches on common tags such as
+       C{<TD>} or C{<DIV>}.
+
+       Call C{withAttribute} with a series of attribute names and values. Specify the list
+       of filter attributes names and values as:
+        - keyword arguments, as in C{(align="right")}, or
+        - as an explicit dict with C{**} operator, when an attribute name is also a Python
+          reserved word, as in C{**{"class":"Customer", "align":"right"}}
+        - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
+       For attribute names with a namespace prefix, you must use the second form.  Attribute
+       names are matched insensitive to upper/lower case.
+
+       To verify that the attribute exists, but without specifying a value, pass
+       C{withAttribute.ANY_VALUE} as the value.
+       """
+    if args:
+        attrs = args[:]
+    else:
+        attrs = attrDict.items()
+    attrs = [(k,v) for k,v in attrs]
+    def pa(s,l,tokens):
+        for attrName,attrValue in attrs:
+            if attrName not in tokens:
+                raise ParseException(s,l,"no matching attribute " + attrName)
+            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+                                            (attrName, tokens[attrName], attrValue))
+    return pa
+withAttribute.ANY_VALUE = object()
+
+opAssoc = _Constants()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def operatorPrecedence( baseExpr, opList ):
+    """Helper method for constructing grammars of expressions made up of
+       operators working in a precedence hierarchy.  Operators may be unary or
+       binary, left- or right-associative.  Parse actions can also be attached
+       to operator expressions.
+
+       Parameters:
+        - baseExpr - expression representing the most basic element for the nested
+        - opList - list of tuples, one for each operator precedence level in the
+          expression grammar; each tuple is of the form
+          (opExpr, numTerms, rightLeftAssoc, parseAction), where:
+           - opExpr is the pyparsing expression for the operator;
+              may also be a string, which will be converted to a Literal;
+              if numTerms is 3, opExpr is a tuple of two expressions, for the
+              two operators separating the 3 terms
+           - numTerms is the number of terms for this operator (must
+              be 1, 2, or 3)
+           - rightLeftAssoc is the indicator whether the operator is
+              right or left associative, using the pyparsing-defined
+              constants opAssoc.RIGHT and opAssoc.LEFT.
+           - parseAction is the parse action to be associated with
+              expressions matching this operator expression (the
+              parse action tuple member may be omitted)
+    """
+    ret = Forward()
+    lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
+    for i,operDef in enumerate(opList):
+        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+        if arity == 3:
+            if opExpr is None or len(opExpr) != 2:
+                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
+            opExpr1, opExpr2 = opExpr
+        thisExpr = Forward()#.setName("expr%d" % i)
+        if rightLeftAssoc == opAssoc.LEFT:
+            if arity == 1:
+                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+                else:
+                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+            elif arity == 3:
+                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        elif rightLeftAssoc == opAssoc.RIGHT:
+            if arity == 1:
+                # try to avoid LR with this extra test
+                if not isinstance(opExpr, Optional):
+                    opExpr = Optional(opExpr)
+                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+                else:
+                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+            elif arity == 3:
+                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        else:
+            raise ValueError("operator must indicate right or left associativity")
+        if pa:
+            matchExpr.setParseAction( pa )
+        thisExpr << ( matchExpr | lastExpr )
+        lastExpr = thisExpr
+    ret << lastExpr
+    return ret
+
+dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
+sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
+quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy())
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+    """Helper method for defining nested lists enclosed in opening and closing
+       delimiters ("(" and ")" are the default).
+
+       Parameters:
+        - opener - opening character for a nested list (default="("); can also be a pyparsing expression
+        - closer - closing character for a nested list (default=")"); can also be a pyparsing expression
+        - content - expression for items within the nested lists (default=None)
+        - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
+
+       If an expression is not provided for the content argument, the nested
+       expression will capture all whitespace-delimited content between delimiters
+       as a list of separate values.
+
+       Use the C{ignoreExpr} argument to define expressions that may contain
+       opening or closing characters that should not be treated as opening
+       or closing characters for nesting, such as quotedString or a comment
+       expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
+       The default is L{quotedString}, but if no expressions are to be ignored,
+       then pass C{None} for this argument.
+    """
+    if opener == closer:
+        raise ValueError("opening and closing strings cannot be the same")
+    if content is None:
+        if isinstance(opener,basestring) and isinstance(closer,basestring):
+            if len(opener) == 1 and len(closer)==1:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr +
+                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+                                ).setParseAction(lambda t:t[0].strip()))
+            else:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr +
+                                    ~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+        else:
+            raise ValueError("opening and closing arguments must be strings if no content expression is given")
+    ret = Forward()
+    if ignoreExpr is not None:
+        ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+    else:
+        ret << Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )
+    return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+    """Helper method for defining space-delimited indentation blocks, such as
+       those used to define block statements in Python source code.
+
+       Parameters:
+        - blockStatementExpr - expression defining syntax of statement that
+            is repeated within the indented block
+        - indentStack - list created by caller to manage indentation stack
+            (multiple statementWithIndentedBlock expressions within a single grammar
+            should share a common indentStack)
+        - indent - boolean indicating whether block must be indented beyond the
+            the current level; set to False for block of left-most statements
+            (default=True)
+
+       A valid block must contain at least one C{blockStatement}.
+    """
+    def checkPeerIndent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if curCol != indentStack[-1]:
+            if curCol > indentStack[-1]:
+                raise ParseFatalException(s,l,"illegal nesting")
+            raise ParseException(s,l,"not a peer entry")
+
+    def checkSubIndent(s,l,t):
+        curCol = col(l,s)
+        if curCol > indentStack[-1]:
+            indentStack.append( curCol )
+        else:
+            raise ParseException(s,l,"not a subentry")
+
+    def checkUnindent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+            raise ParseException(s,l,"not an unindent")
+        indentStack.pop()
+
+    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+    INDENT = Empty() + Empty().setParseAction(checkSubIndent)
+    PEER   = Empty().setParseAction(checkPeerIndent)
+    UNDENT = Empty().setParseAction(checkUnindent)
+    if indent:
+        smExpr = Group( Optional(NL) +
+            #~ FollowedBy(blockStatementExpr) +
+            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+    else:
+        smExpr = Group( Optional(NL) +
+            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+    blockStatementExpr.ignore(_bslash + LineEnd())
+    return smExpr
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
+commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
+replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
+
+htmlComment = Regex(r"<!--[\s\S]*?-->")
+restOfLine = Regex(r".*").leaveWhitespace()
+dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
+cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
+
+javaStyleComment = cppStyleComment
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+_noncomma = "".join( [ c for c in printables if c != "," ] )
+_commasepitem = Combine(OneOrMore(Word(_noncomma) +
+                                  Optional( Word(" \t") +
+                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+
+
+if __name__ == "__main__":
+
+    def test( teststring ):
+        try:
+            tokens = simpleSQL.parseString( teststring )
+            tokenlist = tokens.asList()
+            print (teststring + "->"   + str(tokenlist))
+            print ("tokens = "         + str(tokens))
+            print ("tokens.columns = " + str(tokens.columns))
+            print ("tokens.tables = "  + str(tokens.tables))
+            print (tokens.asXML("SQL",True))
+        except ParseBaseException:
+            err = sys.exc_info()[1]
+            print (teststring + "->")
+            print (err.line)
+            print (" "*(err.column-1) + "^")
+            print (err)
+        print()
+
+    selectToken    = CaselessLiteral( "select" )
+    fromToken      = CaselessLiteral( "from" )
+
+    ident          = Word( alphas, alphanums + "_$" )
+    columnName     = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
+    columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
+    tableName      = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
+    tableNameList  = Group( delimitedList( tableName ) )#.setName("tables")
+    simpleSQL      = ( selectToken + \
+                     ( '*' | columnNameList ).setResultsName( "columns" ) + \
+                     fromToken + \
+                     tableNameList.setResultsName( "tables" ) )
+
+    test( "SELECT * from XYZZY, ABC" )
+    test( "select * from SYS.XYZZY" )
+    test( "Select A from Sys.dual" )
+    test( "Select AA,BB,CC from Sys.dual" )
+    test( "Select A, B, C from Sys.dual" )
+    test( "Select A, B, C from Sys.dual" )
+    test( "Xelect A, B, C from Sys.dual" )
+    test( "Select A, B, C frox Sys.dual" )
+    test( "Select" )
+    test( "Select ^^^ frox Sys.dual" )
+    test( "Select A, B, C from Sys.dual, Table2   " )
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..de01cc1
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,3 @@
+[nosetests]
+where = utest
+exe = True
diff --git a/template_utils.py b/template_utils.py
new file mode 100644
index 0000000..1db0292
--- /dev/null
+++ b/template_utils.py
@@ -0,0 +1,90 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import os
+import sys
+
+import tenjin
+
+""" @brief utilities for rendering templates
+"""
+
+def render_template(out, name, path, context, prefix = None):
+    """
+    Render a template using tenjin.
+    out: a file-like object
+    name: name of the template
+    path: array of directories to search for the template
+    context: dictionary of variables to pass to the template
+    prefix: optional prefix to use for embedding (for other languages than python)
+    """
+    pp = [ tenjin.PrefixedLinePreprocessor(prefix=prefix) if prefix else tenjin.PrefixedLinePreprocessor() ] # support "::" syntax
+    template_globals = { "to_str": str, "escape": str } # disable HTML escaping
+    engine = TemplateEngine(path=path, pp=pp)
+    out.write(engine.render(name, context, template_globals))
+
+def render_static(out, name, path):
+    """
+    Write out a static template.
+    out: a file-like object
+    name: name of the template
+    path: array of directories to search for the template
+    """
+    # Reuse the tenjin logic for finding the template
+    template_filename = tenjin.FileSystemLoader().find(name, path)
+    if not template_filename:
+        raise ValueError("template %s not found" % name)
+    with open(template_filename) as infile:
+        out.write(infile.read())
+
+class TemplateEngine(tenjin.Engine):
+    def include(self, template_name, **kwargs):
+        """
+        Tenjin has an issue with nested includes that use the same local variable
+        names, because it uses the same context dict for each level of nesting.
+        The fix is to copy the context.
+        """
+        frame = sys._getframe(1)
+        locals  = frame.f_locals
+        globals = frame.f_globals
+        context = locals["_context"].copy()
+        context.update(kwargs)
+        template = self.get_template(template_name, context, globals)
+        return template.render(context, globals, _buf=locals["_buf"])
+
+def open_output(install_dir, name):
+    """
+    Open an output file for writing
+
+    'name' may include slashes. Subdirectories will be automatically created.
+    """
+    print "Writing %s" % name
+    path = os.path.join(install_dir, name)
+    dirpath = os.path.dirname(path)
+    if not os.path.exists(dirpath):
+        os.makedirs(dirpath)
+    return open(path, "w")
diff --git a/test_data/__init__.py b/test_data/__init__.py
new file mode 100644
index 0000000..dc063fa
--- /dev/null
+++ b/test_data/__init__.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import fnmatch
+import os
+
+_test_data_dir = os.path.dirname(os.path.realpath(__file__))
+
+def list_files():
+    """
+    Return a list of the data files in this directory
+
+    These strings are suitable to be passed to read().
+    """
+
+    result = []
+    for dirname, dirnames, filenames in os.walk(_test_data_dir):
+        dirname = os.path.relpath(dirname, _test_data_dir)
+        for filename in filenames:
+            if filename.endswith('.data') and not filename.startswith('.'):
+                result.append(dirname + '/' + filename)
+    return sorted(result)
+
+def glob(pattern):
+    for f in list_files():
+        if fnmatch.fnmatch(f, pattern):
+            yield f
+
+def exists(name):
+    return os.path.exists(os.path.join(_test_data_dir, name))
+
+def read(name):
+    """
+    Read, parse, and return a test data file
+
+    @param name Filename relative to the test_data directory
+    @returns A hash from section to the string contents
+
+    A section named "binary" is treated specially: it's treated
+    as a hex dump and parsed into a binary string.
+    """
+
+    section_lines = {}
+    cur_section = None
+
+    with open(os.path.join(_test_data_dir, name)) as f:
+        for line in f:
+            line = line.rstrip().partition('#')[0].rstrip()
+            if line == '':
+                continue
+            elif line.startswith('--'):
+                cur_section = line[2:].strip()
+                if cur_section in section_lines:
+                    raise Exception("section %s already exists in the test data file")
+                section_lines[cur_section] = []
+            elif cur_section:
+                section_lines[cur_section].append(line)
+    data = { section: '\n'.join(lines) for (section, lines) in section_lines.items() }
+
+    # Special case: convert 'binary' section into binary
+    # The string '00 11\n22 33' results in "\x00\x11\x22\x33"
+    if 'binary' in data:
+        hex_strs = data['binary'].split()
+        data['binary'] = ''.join(map(lambda x: chr(int(x, 16)), hex_strs))
+
+    return data
diff --git a/test_data/action_output.data b/test_data/action_output.data
new file mode 100644
index 0000000..3afb525
--- /dev/null
+++ b/test_data/action_output.data
@@ -0,0 +1,9 @@
+-- binary
+00 00 # type
+00 08 # len
+ff f8 # in_port
+ff ff # max_len
+-- python
+ofp.action.output(port=ofp.OFPP_IN_PORT, max_len=0xffff)
+-- python pretty-printer
+output { port = OFPP_IN_PORT, max_len = 0xffff }
diff --git a/test_data/example.data b/test_data/example.data
new file mode 100644
index 0000000..d6186e8
--- /dev/null
+++ b/test_data/example.data
@@ -0,0 +1,17 @@
+# Comment outside section
+# Another comment
+
+# That was a blank line
+This is not a blank line
+-- section1
+ abc def
+ghi
+-- section2
+123
+456 # comment
+# comment inside a section
+789
+-- binary
+00 01 02 03 04 05 06 07 # comment
+77 66 55 44 33 22 11 00
+ # comment in binary
diff --git a/test_data/of10/action_bsn_set_tunnel_dst.data b/test_data/of10/action_bsn_set_tunnel_dst.data
new file mode 100644
index 0000000..7a5747c
--- /dev/null
+++ b/test_data/of10/action_bsn_set_tunnel_dst.data
@@ -0,0 +1,13 @@
+-- binary
+ff ff # type
+00 10 # len
+00 5c 16 c7 # experimenter
+00 00 00 02 # subtype
+12 34 56 78 # dst
+-- python
+ofp.action.bsn_set_tunnel_dst(dst=0x12345678)
+-- python pretty-printer
+bsn_set_tunnel_dst { dst = 0x12345678 }
+-- c
+obj = of_action_bsn_set_tunnel_dst_new(OF_VERSION_1_0);
+of_action_bsn_set_tunnel_dst_dst_set(obj, 0x12345678);
diff --git a/test_data/of10/desc_stats_reply.data b/test_data/of10/desc_stats_reply.data
new file mode 100644
index 0000000..c000bb0
--- /dev/null
+++ b/test_data/of10/desc_stats_reply.data
@@ -0,0 +1,171 @@
+-- binary
+01 11 # version / type
+04 2c # length
+00 00 00 03 # xid
+00 00 # stats_type
+00 01 # flags
+54 68 65 20 49 6e 64 69 # mfr_desc
+67 6f 2d 32 20 43 6f 6d # ...
+6d 75 6e 69 74 79 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+55 6e 6b 6e 6f 77 6e 20 # hw_desc
+73 65 72 76 65 72 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+49 6e 64 69 67 6f 2d 32 # sw_desc
+20 4c 52 49 20 70 72 65 # ...
+2d 72 65 6c 65 61 73 65 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+31 31 32 33 35 38 31 33 # serial_num
+32 31 33 34 35 35 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+49 6e 64 69 67 6f 2d 32 # dp_desc
+20 4c 52 49 20 66 6f 72 # ...
+77 61 72 64 69 6e 67 20 # ...
+6d 6f 64 75 6c 65 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+-- python
+ofp.message.desc_stats_reply(
+    xid=3,
+    flags=ofp.OFPSF_REPLY_MORE,
+    mfr_desc="The Indigo-2 Community",
+    hw_desc="Unknown server",
+    sw_desc="Indigo-2 LRI pre-release",
+    serial_num="11235813213455",
+    dp_desc="Indigo-2 LRI forwarding module")
+-- c
+obj = of_desc_stats_reply_new(OF_VERSION_1_0);
+of_desc_stats_reply_xid_set(obj, 3);
+of_desc_stats_reply_flags_set(obj, OF_STATS_REPLY_FLAG_REPLY_MORE);
+{
+    of_desc_str_t mfr_desc = "The Indigo-2 Community";
+    of_desc_stats_reply_mfr_desc_set(obj, mfr_desc);
+}
+{
+    of_desc_str_t hw_desc = "Unknown server";
+    of_desc_stats_reply_hw_desc_set(obj, hw_desc);
+}
+{
+    of_desc_str_t sw_desc = "Indigo-2 LRI pre-release";
+    of_desc_stats_reply_sw_desc_set(obj, sw_desc);
+}
+{
+    of_desc_str_t dp_desc = "Indigo-2 LRI forwarding module";
+    of_desc_stats_reply_dp_desc_set(obj, dp_desc);
+}
+{
+    of_serial_num_t serial_num = "11235813213455";
+    of_desc_stats_reply_serial_num_set(obj, serial_num);
+}
diff --git a/test_data/of10/echo_request.data b/test_data/of10/echo_request.data
new file mode 100644
index 0000000..701376e
--- /dev/null
+++ b/test_data/of10/echo_request.data
@@ -0,0 +1,16 @@
+-- binary
+01 02 # version / type
+00 0b # length
+12 34 56 78 # xid
+61 62 01 # data
+-- python
+ofp.message.echo_request(xid=0x12345678, data="ab\x01")
+-- python pretty-printer
+echo_request { xid = 0x12345678, data = 'ab\x01' }
+-- c
+obj = of_echo_request_new(OF_VERSION_1_0);
+of_echo_request_xid_set(obj, 0x12345678);
+{
+    of_octets_t data = { .data=(uint8_t *)"ab\x01", .bytes=3 };
+    of_echo_request_data_set(obj, &data);
+}
diff --git a/test_data/of10/flow_add.data b/test_data/of10/flow_add.data
new file mode 100644
index 0000000..60fd780
--- /dev/null
+++ b/test_data/of10/flow_add.data
@@ -0,0 +1,158 @@
+-- binary
+01 0e # version, type
+00 70 # length
+12 34 56 78 # xid
+
+#### ofp_flow_mod
+00 30 00 e2 # wild cards=(OFPFW_DL_VLAN|OFPFW_NW_PROTO|OFPFW_TP_SRC|OFPFW_TP_DST|OFPFW_DL_VLAN_PCP|OFPFW_NW_TOS)
+00 03 # in_port
+01 23 45 67 89 ab # eth_src
+cd ef 01 23 45 67 # eth_dst
+00 00 # dl_vlan
+00 00 # dl_pcp, pad
+08 00 # dl_type
+00 00 00 00 # nw_tos, nw_proto, pad[2]
+c0 a8 03 7f # nw_src
+ff ff ff ff # nw_dst
+00 00 00 00 # tcp_src, tcp_dst
+
+00 00 00 00 00 00 00 00 # cookie
+00 00 # command
+00 05 # idle_timeout
+00 00 # hard_timeout
+00 00 # priority
+00 00 00 00 # buffer_id
+00 00 #out_port
+00 02 # flags (CHECK_OVERLAP)
+
+#list(ofp_action)
+00 00 00 08 # type=OUTPUT, len=8
+ff fb # port=FLOOD
+00 00 # maxLen=0
+ff ff 00 10 # type=VENDOR, len=16
+00 00 23 20 # vendor = Nicira
+00 12 # subtype=dec_ttl
+00 00 00 00 00 00 # pad(6)
+ff ff 00 10 # type=VENDOR, len=16
+00 5c 16 c7 # vendor = BSN
+00 00 00 02 # subype = set_tunnel_dst
+00 00 00 00 # tunnel dst ip
+-- python
+ofp.message.flow_add(
+    xid=0x12345678,
+    match=ofp.match(
+        wildcards=ofp.OFPFW_DL_VLAN|ofp.OFPFW_NW_PROTO|ofp.OFPFW_TP_SRC|ofp.OFPFW_TP_DST|ofp.OFPFW_DL_VLAN_PCP|ofp.OFPFW_NW_TOS,
+        in_port=3,
+        eth_type=0x800,
+        ipv4_src=0xc0a8037f,
+        ipv4_dst=0xffffffff,
+        eth_src=[0x01, 0x23, 0x45, 0x67, 0x89, 0xab],
+        eth_dst=[0xcd, 0xef, 0x01, 0x23, 0x45, 0x67]),
+    idle_timeout=5,
+    flags=ofp.OFPFF_CHECK_OVERLAP,
+    actions=[
+        ofp.action.output(port=ofp.OFPP_FLOOD),
+        ofp.action.nicira_dec_ttl(),
+        ofp.action.bsn_set_tunnel_dst()])
+-- python pretty-printer
+flow_add {
+  xid = 0x12345678,
+  match = match_v1 {
+    wildcards = OFPFW_DL_VLAN|OFPFW_NW_PROTO|OFPFW_TP_SRC|OFPFW_TP_DST|OFPFW_DL_VLAN_PCP|OFPFW_NW_TOS,
+    in_port = 3,
+    eth_src = 01:23:45:67:89:ab,
+    eth_dst = cd:ef:01:23:45:67,
+    vlan_vid = 0x0,
+    vlan_pcp = 0x0,
+    eth_type = 0x800,
+    ip_dscp = 0x0,
+    ip_proto = 0x0,
+    ipv4_src = 192.168.3.127,
+    ipv4_dst = 255.255.255.255,
+    tcp_src = 0x0,
+    tcp_dst = 0x0
+  },
+  cookie = 0x0,
+  idle_timeout = 0x5,
+  hard_timeout = 0x0,
+  priority = 0x0,
+  buffer_id = 0x0,
+  out_port = 0,
+  flags = 0x2,
+  actions = [
+    output { port = OFPP_FLOOD, max_len = 0x0 },
+    nicira_dec_ttl {  },
+    bsn_set_tunnel_dst { dst = 0x0 }
+  ]
+}
+-- c
+obj = of_flow_add_new(OF_VERSION_1_0);
+of_flow_add_xid_set(obj, 0x12345678);
+of_flow_add_idle_timeout_set(obj, 5);
+of_flow_add_flags_set(obj, 2);
+{
+    of_match_t match = { OF_VERSION_1_0 };
+    match.fields.in_port = 3;
+    match.fields.eth_src = (of_mac_addr_t) { { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab } };
+    match.fields.eth_dst = (of_mac_addr_t) { { 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67 } };
+    match.fields.eth_type = 0x800;
+    match.fields.ipv4_src = 0xc0a8037f;
+    match.fields.ipv4_dst = 0xffffffff;
+    OF_MATCH_MASK_IN_PORT_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_SRC_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_DST_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_TYPE_EXACT_SET(&match);
+    //OF_MATCH_MASK_VLAN_VID_EXACT_SET(&match);
+    //OF_MATCH_MASK_VLAN_PCP_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_TYPE_EXACT_SET(&match);
+    //OF_MATCH_MASK_IP_DSCP_EXACT_SET(&match);
+    //OF_MATCH_MASK_IP_PROTO_EXACT_SET(&match);
+    OF_MATCH_MASK_IPV4_SRC_EXACT_SET(&match);
+    OF_MATCH_MASK_IPV4_DST_EXACT_SET(&match);
+    //OF_MATCH_MASK_TCP_SRC_EXACT_SET(&match);
+    //OF_MATCH_MASK_TCP_DST_EXACT_SET(&match);
+    of_flow_add_match_set(obj, &match);
+}
+{
+    of_list_action_t actions;
+    of_flow_add_actions_bind(obj, &actions);
+    {
+        of_action_t action;
+        of_action_output_init(&action.output, OF_VERSION_1_0, -1, 1);
+        of_list_action_append_bind(&actions, &action);
+        of_action_output_port_set(&action.output, OF_PORT_DEST_FLOOD);
+    }
+    {
+        of_action_t action;
+        of_action_nicira_dec_ttl_init(&action.nicira_dec_ttl, OF_VERSION_1_0, -1, 1);
+        of_list_action_append_bind(&actions, &action);
+    }
+    {
+        of_action_t action;
+        of_action_bsn_set_tunnel_dst_init(&action.bsn_set_tunnel_dst, OF_VERSION_1_0, -1, 1);
+        of_list_action_append_bind(&actions, &action);
+    }
+}
+-- java
+builder.setXid(0x12345678)
+    .setMatch(
+        factory.buildMatch()
+            .setExact(MatchField.IN_PORT, OFPort.of(3))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv4)
+            .setExact(MatchField.IPV4_SRC, IPv4Address.of(0xc0a8037f))
+            .setExact(MatchField.IPV4_DST, IPv4Address.of(0xffffffff))
+            .setExact(MatchField.ETH_SRC, MacAddress.of("01:23:45:67:89:ab"))
+            .setExact(MatchField.ETH_DST, MacAddress.of("cd:ef:01:23:45:67"))
+            .build()
+    )
+    .setIdleTimeout(5)
+    .setFlags(Sets.immutableEnumSet(OFFlowModFlags.CHECK_OVERLAP))
+    .setBufferId(OFBufferId.of(0))
+    .setOutPort(OFPort.of(0)) // doesn't make that much sense, but is in the example
+    .setActions(
+        ImmutableList.of(
+            factory.actions().output(OFPort.FLOOD, 0),
+            factory.actions().niciraDecTtl(),
+            factory.actions().bsnSetTunnelDst(0)
+        )
+    );
diff --git a/test_data/of10/flow_stats_entry.data b/test_data/of10/flow_stats_entry.data
new file mode 100644
index 0000000..b9a8dc1
--- /dev/null
+++ b/test_data/of10/flow_stats_entry.data
@@ -0,0 +1,137 @@
+-- binary
+00 68 # length
+03 # table_id
+00 # pad
+
+#### ofp_match_v1
+00 30 00 e2 # wild cards=(OFPFW_DL_VLAN|OFPFW_NW_PROTO|OFPFW_TP_SRC|OFPFW_TP_DST|OFPFW_DL_VLAN_PCP|OFPFW_NW_TOS)
+00 03 # in_port
+01 23 45 67 89 ab # eth_src
+cd ef 01 23 45 67 # eth_dst
+00 00 # dl_vlan
+00 00 # dl_pcp, pad
+08 00 # dl_type
+00 00 00 00 # nw_tos, nw_proto, pad[2]
+c0 a8 03 7f # nw_src
+ff ff ff ff # nw_dst
+00 00 00 00 # tcp_src, tcp_dst
+
+00 00 00 01 # duration_sec
+00 00 00 02 # duration_nsec
+00 64 # priority
+00 05 # idle_timeout
+00 0a # hard_timeout
+00 00 00 00 00 00 # pad
+01 23 45 67 89 ab cd ef # cookie
+00 00 00 00 00 00 00 0a # packet_count
+00 00 00 00 00 00 03 e8 # byte_count
+00 00 # actions[0].type
+00 08 # actions[0].len
+00 01 # actions[0].port
+00 00 # actions[0].max_len
+00 00 # actions[1].type
+00 08 # actions[1].len
+00 02 # actions[1].port
+00 00 # actions[1].max_len
+-- python
+ofp.flow_stats_entry(
+    table_id=3,
+    match=ofp.match(
+        wildcards=ofp.OFPFW_DL_VLAN|ofp.OFPFW_NW_PROTO|ofp.OFPFW_TP_SRC|ofp.OFPFW_TP_DST|ofp.OFPFW_DL_VLAN_PCP|ofp.OFPFW_NW_TOS,
+        in_port=3,
+        eth_type=0x800,
+        ipv4_src=0xc0a8037f,
+        ipv4_dst=0xffffffff,
+        eth_src=[0x01, 0x23, 0x45, 0x67, 0x89, 0xab],
+        eth_dst=[0xcd, 0xef, 0x01, 0x23, 0x45, 0x67]),
+    duration_sec=1,
+    duration_nsec=2,
+    priority=100,
+    idle_timeout=5,
+    hard_timeout=10,
+    cookie=0x0123456789abcdef,
+    packet_count=10,
+    byte_count=1000,
+    actions=[
+        ofp.action.output(port=1),
+        ofp.action.output(port=2)])
+-- c
+obj = of_flow_stats_entry_new(OF_VERSION_1_0);
+{
+    of_object_t list;
+    of_flow_stats_entry_actions_bind(obj, &list);
+    {
+        of_object_t *obj = of_action_output_new(OF_VERSION_1_0);
+        of_action_output_max_len_set(obj, 0);
+        of_action_output_port_set(obj, 1);
+        of_list_append(&list, obj);
+        of_object_delete(obj);
+    }
+    {
+        of_object_t *obj = of_action_output_new(OF_VERSION_1_0);
+        of_action_output_max_len_set(obj, 0);
+        of_action_output_port_set(obj, 2);
+        of_list_append(&list, obj);
+        of_object_delete(obj);
+    }
+}
+
+of_flow_stats_entry_byte_count_set(obj, 1000);
+of_flow_stats_entry_cookie_set(obj, 81985529216486895);
+of_flow_stats_entry_duration_nsec_set(obj, 2);
+of_flow_stats_entry_duration_sec_set(obj, 1);
+of_flow_stats_entry_hard_timeout_set(obj, 10);
+of_flow_stats_entry_idle_timeout_set(obj, 5);
+{
+    of_match_t match = { OF_VERSION_1_0 };
+    match.fields.in_port = 3;
+    match.fields.eth_src = (of_mac_addr_t) { { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab } };
+    match.fields.eth_dst = (of_mac_addr_t) { { 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67 } };
+    match.fields.eth_type = 0x800;
+    match.fields.ipv4_src = 0xc0a8037f;
+    match.fields.ipv4_dst = 0xffffffff;
+    OF_MATCH_MASK_IN_PORT_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_SRC_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_DST_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_TYPE_EXACT_SET(&match);
+    //OF_MATCH_MASK_VLAN_VID_EXACT_SET(&match);
+    //OF_MATCH_MASK_VLAN_PCP_EXACT_SET(&match);
+    OF_MATCH_MASK_ETH_TYPE_EXACT_SET(&match);
+    //OF_MATCH_MASK_IP_DSCP_EXACT_SET(&match);
+    //OF_MATCH_MASK_IP_PROTO_EXACT_SET(&match);
+    OF_MATCH_MASK_IPV4_SRC_EXACT_SET(&match);
+    OF_MATCH_MASK_IPV4_DST_EXACT_SET(&match);
+    //OF_MATCH_MASK_TCP_SRC_EXACT_SET(&match);
+    //OF_MATCH_MASK_TCP_DST_EXACT_SET(&match);
+    of_flow_stats_entry_match_set(obj, &match);
+}
+of_flow_stats_entry_packet_count_set(obj, 10);
+of_flow_stats_entry_priority_set(obj, 100);
+of_flow_stats_entry_table_id_set(obj, 3);
+-- java
+    builder
+      .setTableId(TableId.of(3))
+      .setMatch(
+        factory.buildMatch()
+            .setExact(MatchField.IN_PORT, OFPort.of(3))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv4)
+            .setExact(MatchField.IPV4_SRC, IPv4Address.of(0xc0a8037f))
+            .setExact(MatchField.IPV4_DST, IPv4Address.of(0xffffffff))
+            .setExact(MatchField.ETH_SRC, MacAddress.of("01:23:45:67:89:ab"))
+            .setExact(MatchField.ETH_DST, MacAddress.of("cd:ef:01:23:45:67"))
+            .build()
+      )
+      .setDurationSec(1)
+      .setDurationNsec(2)
+      .setPriority(100)
+      .setIdleTimeout(5)
+      .setHardTimeout(10)
+      .setCookie(U64.of(0x0123456789abcdefL))
+      .setPacketCount(U64.of(10))
+      .setByteCount(U64.of(1000))
+      .setActions(
+            ImmutableList.<OFAction>of(
+                   factory.actions().output(OFPort.of(1), 0),
+                   factory.actions().output(OFPort.of(2), 0)
+                   )
+      );
diff --git a/test_data/of10/flow_stats_reply.data b/test_data/of10/flow_stats_reply.data
new file mode 100644
index 0000000..185235b
--- /dev/null
+++ b/test_data/of10/flow_stats_reply.data
@@ -0,0 +1,182 @@
+-- binary
+01 11 # version/type
+00 e4 # length
+00 00 00 06 # xid
+00 01 # stats_type
+00 00 # flags
+00 68 # entries[0].length
+03 # entries[0].table_id
+00 # entries[0].pad
+00 3f ff ff # entries[0].match.wildcards
+00 00 00 00 # remaining match fields
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 01 # entries[0].duration_sec
+00 00 00 02 # entries[0].duration_nsec
+00 64 # entries[0].priority
+00 05 # entries[0].idle_timeout
+00 0a # entries[0].hard_timeout
+00 00 00 00 00 00 # pad
+01 23 45 67 89 ab cd ef # entries[0].cookie
+00 00 00 00 00 00 00 0a # entries[0].packet_count
+00 00 00 00 00 00 03 e8 # entries[0].byte_count
+00 00 # entries[0].actions[0].type
+00 08 # entries[0].actions[0].len
+00 01 # entries[0].actions[0].port
+00 00 # entries[0].actions[0].max_len
+00 00 # entries[0].actions[1].type
+00 08 # entries[0].actions[1].len
+00 02 # entries[0].actions[1].port
+00 00 # entries[0].actions[1].max_len
+00 70 # entries[1].length
+04 # entries[1].table_id
+00 # entries[1].pad
+00 3f ff ff # entries[1].match.wildcards
+00 00 00 00 # remaining match fields
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 00 00 00 00 00 # ...
+00 00 00 01 # entries[1].duration_sec
+00 00 00 02 # entries[1].duration_nsec
+00 64 # entries[1].priority
+00 05 # entries[1].idle_timeout
+00 0a # entries[1].hard_timeout
+00 00 00 00 00 00 # pad
+01 23 45 67 89 ab cd ef # entries[1].cookie
+00 00 00 00 00 00 00 0a # entries[1].packet_count
+00 00 00 00 00 00 03 e8 # entries[1].byte_count
+00 00 # entries[1].actions[0].type
+00 08 # entries[1].actions[0].len
+00 01 # entries[1].actions[0].port
+00 00 # entries[1].actions[0].max_len
+00 00 # entries[1].actions[1].type
+00 08 # entries[1].actions[1].len
+00 02 # entries[1].actions[1].port
+00 00 # entries[1].actions[1].max_len
+00 00 # entries[1].actions[2].type
+00 08 # entries[1].actions[2].len
+00 03 # entries[1].actions[2].port
+00 00 # entries[1].actions[2].max_len
+-- python
+ofp.message.flow_stats_reply(
+    xid=6,
+    flags=0,
+    entries=[
+        ofp.flow_stats_entry(table_id=3,
+            match=ofp.match(),
+            duration_sec=1,
+            duration_nsec=2,
+            priority=100,
+            idle_timeout=5,
+            hard_timeout=10,
+            cookie=0x0123456789abcdef,
+            packet_count=10,
+            byte_count=1000,
+            actions=[ofp.action.output(port=1),
+            ofp.action.output(port=2)]),
+        ofp.flow_stats_entry(table_id=4,
+            match=ofp.match(),
+            duration_sec=1,
+            duration_nsec=2,
+            priority=100,
+            idle_timeout=5,
+            hard_timeout=10,
+            cookie=0x0123456789abcdef,
+            packet_count=10,
+            byte_count=1000,
+            actions=[ofp.action.output(port=1),
+            ofp.action.output(port=2),
+            ofp.action.output(port=3)])])
+-- c
+obj = of_flow_stats_reply_new(OF_VERSION_1_0);
+of_flow_stats_reply_flags_set(obj, 0);
+of_flow_stats_reply_xid_set(obj, 6);
+{
+    of_object_t *entries = of_list_flow_stats_entry_new(OF_VERSION_1_0);
+    {
+        of_object_t *elem = of_flow_stats_entry_new(OF_VERSION_1_0);
+        of_flow_stats_entry_byte_count_set(elem, 1000);
+        of_flow_stats_entry_cookie_set(elem, 81985529216486895);
+        of_flow_stats_entry_duration_nsec_set(elem, 2);
+        of_flow_stats_entry_duration_sec_set(elem, 1);
+        of_flow_stats_entry_hard_timeout_set(elem, 10);
+        of_flow_stats_entry_idle_timeout_set(elem, 5);
+        of_flow_stats_entry_packet_count_set(elem, 10);
+        of_flow_stats_entry_priority_set(elem, 100);
+        of_flow_stats_entry_table_id_set(elem, 3);
+        {
+            of_match_t match = { OF_VERSION_1_0 };
+            of_flow_stats_entry_match_set(elem, &match);
+        }
+        {
+            of_object_t *actions = of_list_action_new(OF_VERSION_1_0);
+            {
+                of_object_t *elem = of_action_output_new(OF_VERSION_1_0);
+                of_action_output_max_len_set(elem, 0);
+                of_action_output_port_set(elem, 1);
+                of_list_append(actions, elem);
+                of_object_delete(elem);
+            }
+            {
+                of_object_t *elem = of_action_output_new(OF_VERSION_1_0);
+                of_action_output_max_len_set(elem, 0);
+                of_action_output_port_set(elem, 2);
+                of_list_append(actions, elem);
+                of_object_delete(elem);
+            }
+            of_flow_stats_entry_actions_set(elem, actions);
+            of_object_delete(actions);
+        }
+        of_list_append(entries, elem);
+        of_object_delete(elem);
+    }
+    {
+        of_object_t *elem = of_flow_stats_entry_new(OF_VERSION_1_0);
+        of_flow_stats_entry_byte_count_set(elem, 1000);
+        of_flow_stats_entry_cookie_set(elem, 81985529216486895);
+        of_flow_stats_entry_duration_nsec_set(elem, 2);
+        of_flow_stats_entry_duration_sec_set(elem, 1);
+        of_flow_stats_entry_hard_timeout_set(elem, 10);
+        of_flow_stats_entry_idle_timeout_set(elem, 5);
+        of_flow_stats_entry_packet_count_set(elem, 10);
+        of_flow_stats_entry_priority_set(elem, 100);
+        of_flow_stats_entry_table_id_set(elem, 4);
+        {
+            of_match_t match = { OF_VERSION_1_0 };
+            of_flow_stats_entry_match_set(elem, &match);
+        }
+        {
+            of_object_t *actions = of_list_action_new(OF_VERSION_1_0);
+            {
+                of_object_t *elem = of_action_output_new(OF_VERSION_1_0);
+                of_action_output_max_len_set(elem, 0);
+                of_action_output_port_set(elem, 1);
+                of_list_append(actions, elem);
+                of_object_delete(elem);
+            }
+            {
+                of_object_t *elem = of_action_output_new(OF_VERSION_1_0);
+                of_action_output_max_len_set(elem, 0);
+                of_action_output_port_set(elem, 2);
+                of_list_append(actions, elem);
+                of_object_delete(elem);
+            }
+            {
+                of_object_t *elem = of_action_output_new(OF_VERSION_1_0);
+                of_action_output_max_len_set(elem, 0);
+                of_action_output_port_set(elem, 3);
+                of_list_append(actions, elem);
+                of_object_delete(elem);
+            }
+            of_flow_stats_entry_actions_set(elem, actions);
+            of_object_delete(actions);
+        }
+        of_list_append(entries, elem);
+        of_object_delete(elem);
+    }
+    of_flow_stats_reply_entries_set(obj, entries);
+    of_object_delete(entries);
+}
diff --git a/test_data/of10/hello.data b/test_data/of10/hello.data
new file mode 100644
index 0000000..d29dff9
--- /dev/null
+++ b/test_data/of10/hello.data
@@ -0,0 +1,11 @@
+-- binary
+01 00 # version / type
+00 08 # length
+12 34 56 78 # xid
+-- python
+ofp.message.hello(xid=0x12345678)
+-- c
+obj = of_hello_new(OF_VERSION_1_0);
+of_hello_xid_set(obj, 305419896);
+-- java
+builder.setXid(0x12345678)
diff --git a/test_data/of10/packet_in.data b/test_data/of10/packet_in.data
new file mode 100644
index 0000000..d5ccf32
--- /dev/null
+++ b/test_data/of10/packet_in.data
@@ -0,0 +1,37 @@
+-- binary
+01 0a # version / type
+00 15 # length
+12 34 56 78 # xid
+ab cd ef 01 # buffer_id
+00 09 # total_len
+ff fe # in_port
+01 # reason
+00 # pad
+61 62 63 # data
+-- python
+ofp.message.packet_in(
+    xid=0x12345678,
+    buffer_id=0xabcdef01,
+    total_len=9,
+    in_port=ofp.OFPP_LOCAL,
+    reason=ofp.OFPR_ACTION,
+    data='abc')
+-- c
+obj = of_packet_in_new(OF_VERSION_1_0);
+of_packet_in_buffer_id_set(obj, 0xabcdef01);
+{
+    of_octets_t data = { .bytes=3, .data=(uint8_t *)"\x61\x62\x63" };
+    of_packet_in_data_set(obj, &data);
+}
+of_packet_in_in_port_set(obj, 65534);
+of_packet_in_reason_set(obj, 1);
+of_packet_in_total_len_set(obj, 9);
+of_packet_in_xid_set(obj, 305419896);
+-- java
+builder
+   .setXid(0x12345678)
+   .setBufferId(OFBufferId.of(0xabcdef01))
+   .setTotalLen(9)
+   .setInPort(OFPort.LOCAL)
+   .setReason(OFPacketInReason.ACTION)
+   .setData(new byte[] { 0x61, 0x62, 0x63 } );
diff --git a/test_data/of10/packet_out.data b/test_data/of10/packet_out.data
new file mode 100644
index 0000000..a3c642b
--- /dev/null
+++ b/test_data/of10/packet_out.data
@@ -0,0 +1,53 @@
+-- binary
+01 0d # version/type
+00 23 # length
+12 34 56 78 # xid
+ab cd ef 01 # buffer_id
+ff fe # in_port
+00 10 # actions_len
+00 00 # actions[0].type
+00 08 # actions[0].len
+00 01 # actions[0].port
+00 00 # actions[0].max_len
+00 00 # actions[1].type
+00 08 # actions[1].len
+00 02 # actions[1].port
+00 00 # actions[1].max_len
+61 62 63 # data
+-- python
+ofp.message.packet_out(
+    xid=0x12345678,
+    buffer_id=0xabcdef01,
+    in_port=ofp.OFPP_LOCAL,
+    actions=[
+        ofp.action.output(port=1),
+        ofp.action.output(port=2)],
+    data='abc')
+-- c
+obj = of_packet_out_new(OF_VERSION_1_0);
+of_packet_out_buffer_id_set(obj, 0xabcdef01);
+of_packet_out_in_port_set(obj, 65534);
+of_packet_out_xid_set(obj, 305419896);
+{
+    of_object_t *list = of_list_action_new(OF_VERSION_1_0);
+    {
+        of_object_t *obj = of_action_output_new(OF_VERSION_1_0);
+        of_action_output_max_len_set(obj, 0);
+        of_action_output_port_set(obj, 1);
+        of_list_append(list, obj);
+        of_object_delete(obj);
+    }
+    {
+        of_object_t *obj = of_action_output_new(OF_VERSION_1_0);
+        of_action_output_max_len_set(obj, 0);
+        of_action_output_port_set(obj, 2);
+        of_list_append(list, obj);
+        of_object_delete(obj);
+    }
+    of_packet_out_actions_set(obj, list);
+    of_object_delete(list);
+}
+{
+    of_octets_t data = { .bytes=3, .data=(uint8_t *)"\x61\x62\x63" };
+    of_packet_out_data_set(obj, &data);
+}
diff --git a/test_data/of10/port_desc.data b/test_data/of10/port_desc.data
new file mode 100644
index 0000000..56242cc
--- /dev/null
+++ b/test_data/of10/port_desc.data
@@ -0,0 +1,38 @@
+-- binary
+ff fd # port_no
+01 02 03 04 05 06 # hw_addr
+66 6f 6f 00 00 00 00 00 00 00 00 00 00 00 00 00 # name
+00 00 00 10 # config
+00 00 02 00 # state
+00 00 00 01 # curr
+00 00 00 20 # advertised
+00 00 02 00 # supported
+00 00 08 00 # peer
+-- python
+ofp.port_desc(
+    port_no=ofp.OFPP_CONTROLLER,
+    hw_addr=[1,2,3,4,5,6],
+    name="foo",
+    config=ofp.OFPPC_NO_FLOOD,
+    state=ofp.OFPPS_STP_FORWARD,
+    curr=ofp.OFPPF_10MB_HD,
+    advertised=ofp.OFPPF_1GB_FD,
+    supported=ofp.OFPPF_AUTONEG,
+    peer=ofp.OFPPF_PAUSE_ASYM)
+-- c
+obj = of_port_desc_new(OF_VERSION_1_0);
+of_port_desc_advertised_set(obj, 32);
+of_port_desc_config_set(obj, 16);
+of_port_desc_curr_set(obj, 1);
+{
+    of_mac_addr_t hw_addr = { { 1, 2, 3, 4, 5, 6 } };
+    of_port_desc_hw_addr_set(obj, hw_addr);
+}
+{
+    of_port_name_t name = "foo";
+    of_port_desc_name_set(obj, name);
+}
+of_port_desc_peer_set(obj, 2048);
+of_port_desc_port_no_set(obj, 65533);
+of_port_desc_state_set(obj, 512);
+of_port_desc_supported_set(obj, 512);
diff --git a/test_data/of10/port_mod.data b/test_data/of10/port_mod.data
new file mode 100644
index 0000000..754e139
--- /dev/null
+++ b/test_data/of10/port_mod.data
@@ -0,0 +1,29 @@
+-- binary
+01 0f # version/type
+00 20 # length
+00 00 00 02 # xid
+ff fd # port_no
+01 02 03 04 05 06 # hw_addr
+90 ab cd ef # config
+ff 11 ff 11 # mask
+ca fe 67 89 # advertise
+00 00 00 00 # pad
+-- python
+ofp.message.port_mod(
+    xid=2,
+    port_no=ofp.OFPP_CONTROLLER,
+    hw_addr=[1,2,3,4,5,6],
+    config=0x90ABCDEF,
+    mask=0xFF11FF11,
+    advertise=0xCAFE6789)
+-- c
+obj = of_port_mod_new(OF_VERSION_1_0);
+of_port_mod_advertise_set(obj, 0xCAFE6789);
+of_port_mod_config_set(obj, 0x90ABCDEF);
+{
+    of_mac_addr_t hw_addr = { { 1, 2, 3, 4, 5, 6 } };
+    of_port_mod_hw_addr_set(obj, hw_addr);
+}
+of_port_mod_mask_set(obj, 0xFF11FF11);
+of_port_mod_port_no_set(obj, 65533);
+of_port_mod_xid_set(obj, 2);
diff --git a/test_data/of10/port_stats_reply.data b/test_data/of10/port_stats_reply.data
new file mode 100644
index 0000000..4378387
--- /dev/null
+++ b/test_data/of10/port_stats_reply.data
@@ -0,0 +1,83 @@
+-- binary
+01 11 # version/type
+00 dc # length
+00 00 00 05 # xid
+00 04 # stats_type
+00 00 # flags
+00 01 # entries[0].port_no
+00 00 00 00 00 00 # entries[0].pad
+00 00 00 00 00 00 00 38 # entries[0].rx_packets
+00 00 00 00 00 00 00 00 # entries[0].tx_packets
+00 00 00 00 00 00 00 00 # entries[0].rx_bytes
+00 00 00 00 00 00 00 00 # entries[0].tx_bytes
+00 00 00 00 00 00 00 00 # entries[0].rx_dropped
+00 00 00 00 00 00 00 00 # entries[0].tx_dropped
+00 00 00 00 00 00 00 00 # entries[0].rx_errors
+00 00 00 00 00 00 00 00 # entries[0].tx_errors
+00 00 00 00 00 00 00 00 # entries[0].rx_frame_err
+00 00 00 00 00 00 00 00 # entries[0].rx_over_err
+00 00 00 00 00 00 00 00 # entries[0].rx_crc_err
+00 00 00 00 00 00 00 05 # entries[0].collisions
+ff fe # entries[1].port_no
+00 00 00 00 00 00 # entries[1].pad
+00 00 00 00 00 00 00 01 # entries[1].rx_packets
+00 00 00 00 00 00 00 00 # entries[1].tx_packets
+00 00 00 00 00 00 00 00 # entries[1].rx_bytes
+00 00 00 00 00 00 00 00 # entries[1].tx_bytes
+00 00 00 00 00 00 00 00 # entries[1].rx_dropped
+00 00 00 00 00 00 00 00 # entries[1].tx_dropped
+00 00 00 00 00 00 00 00 # entries[1].rx_errors
+00 00 00 00 00 00 00 00 # entries[1].tx_errors
+00 00 00 00 00 00 00 00 # entries[1].rx_frame_err
+00 00 00 00 00 00 00 00 # entries[1].rx_over_err
+00 00 00 00 00 00 00 00 # entries[1].rx_crc_err
+00 00 00 00 00 00 00 01 # entries[1].collisions
+-- python
+ofp.message.port_stats_reply(
+    xid=5, flags=0, entries=[
+        ofp.port_stats_entry(port_no=1, rx_packets=56, collisions=5),
+        ofp.port_stats_entry(port_no=ofp.OFPP_LOCAL, rx_packets=1, collisions=1)])
+-- c
+obj = of_port_stats_reply_new(OF_VERSION_1_0);
+{
+    of_object_t list;
+    of_port_stats_reply_entries_bind(obj, &list);
+    {
+        of_object_t *obj = of_port_stats_entry_new(OF_VERSION_1_0);
+        of_port_stats_entry_collisions_set(obj, 5);
+        of_port_stats_entry_port_no_set(obj, 1);
+        of_port_stats_entry_rx_bytes_set(obj, 0);
+        of_port_stats_entry_rx_crc_err_set(obj, 0);
+        of_port_stats_entry_rx_dropped_set(obj, 0);
+        of_port_stats_entry_rx_errors_set(obj, 0);
+        of_port_stats_entry_rx_frame_err_set(obj, 0);
+        of_port_stats_entry_rx_over_err_set(obj, 0);
+        of_port_stats_entry_rx_packets_set(obj, 56);
+        of_port_stats_entry_tx_bytes_set(obj, 0);
+        of_port_stats_entry_tx_dropped_set(obj, 0);
+        of_port_stats_entry_tx_errors_set(obj, 0);
+        of_port_stats_entry_tx_packets_set(obj, 0);
+        of_list_append(&list, obj);
+        of_object_delete(obj);
+    }
+    {
+        of_object_t *obj = of_port_stats_entry_new(OF_VERSION_1_0);
+        of_port_stats_entry_collisions_set(obj, 1);
+        of_port_stats_entry_port_no_set(obj, 65534);
+        of_port_stats_entry_rx_bytes_set(obj, 0);
+        of_port_stats_entry_rx_crc_err_set(obj, 0);
+        of_port_stats_entry_rx_dropped_set(obj, 0);
+        of_port_stats_entry_rx_errors_set(obj, 0);
+        of_port_stats_entry_rx_frame_err_set(obj, 0);
+        of_port_stats_entry_rx_over_err_set(obj, 0);
+        of_port_stats_entry_rx_packets_set(obj, 1);
+        of_port_stats_entry_tx_bytes_set(obj, 0);
+        of_port_stats_entry_tx_dropped_set(obj, 0);
+        of_port_stats_entry_tx_errors_set(obj, 0);
+        of_port_stats_entry_tx_packets_set(obj, 0);
+        of_list_append(&list, obj);
+        of_object_delete(obj);
+    }
+}
+of_port_stats_reply_flags_set(obj, 0);
+of_port_stats_reply_xid_set(obj, 5);
diff --git a/test_data/of10/port_status.data b/test_data/of10/port_status.data
new file mode 100644
index 0000000..7589eae
--- /dev/null
+++ b/test_data/of10/port_status.data
@@ -0,0 +1,54 @@
+-- binary
+01 0c # version / type
+00 40 # length
+00 00 00 04 # xid
+01 # reason
+00 00 00 00 00 00 00 # pad
+ff fd # desc.port_no
+01 02 03 04 05 06 # desc.hw_addr
+66 6f 6f 00 00 00 00 00 # desc.name
+00 00 00 00 00 00 00 00 # ...
+00 00 00 10 # desc.config
+00 00 02 00 # desc.state
+00 00 00 01 # desc.curr
+00 00 00 20 # desc.advertised
+00 00 02 00 # desc.supported
+00 00 08 00 # desc.peer
+-- python
+ofp.message.port_status(
+    xid=4,
+    reason=ofp.OFPPR_DELETE,
+    desc=ofp.port_desc(
+        port_no=ofp.OFPP_CONTROLLER,
+        hw_addr=[1,2,3,4,5,6],
+        name="foo",
+        config=ofp.OFPPC_NO_FLOOD,
+        state=ofp.OFPPS_STP_FORWARD,
+        curr=ofp.OFPPF_10MB_HD,
+        advertised=ofp.OFPPF_1GB_FD,
+        supported=ofp.OFPPF_AUTONEG,
+        peer=ofp.OFPPF_PAUSE_ASYM))
+-- c
+obj = of_port_status_new(OF_VERSION_1_0);
+{
+    of_object_t *desc = of_port_desc_new(OF_VERSION_1_0);
+    of_port_desc_advertised_set(desc, 32);
+    of_port_desc_config_set(desc, 16);
+    of_port_desc_curr_set(desc, 1);
+    {
+	of_mac_addr_t hw_addr = { { 1, 2, 3, 4, 5, 6 } };
+	of_port_desc_hw_addr_set(desc, hw_addr);
+    }
+    {
+	of_port_name_t name = "foo";
+	of_port_desc_name_set(desc, name);
+    }
+    of_port_desc_peer_set(desc, 2048);
+    of_port_desc_port_no_set(desc, 65533);
+    of_port_desc_state_set(desc, 512);
+    of_port_desc_supported_set(desc, 512);
+    of_port_status_desc_set(obj, desc);
+    of_object_delete(desc);
+}
+of_port_status_reason_set(obj, 1);
+of_port_status_xid_set(obj, 4);
diff --git a/test_data/of10/queue_get_config_reply.data b/test_data/of10/queue_get_config_reply.data
new file mode 100644
index 0000000..1016cad
--- /dev/null
+++ b/test_data/of10/queue_get_config_reply.data
@@ -0,0 +1,83 @@
+-- binary
+01 15 # version / type
+00 50 # length
+12 34 56 78 # xid
+ff fe # port
+00 00 00 00 00 00 # pad
+00 00 00 01 # queues[0].queue_id
+00 18 # queues[0].len
+00 00 # queues[0].pad
+00 01 # queues[0].properties[0].type
+00 10 # queues[0].properties[0].length
+00 00 00 00 # queues[0].properties[0].pad
+00 05 # queues[0].properties[0].rate
+00 00 00 00 00 00 # queues[0].properties[0].pad2
+00 00 00 02 # queues[1].queue_id
+00 28 # queues[1].len
+00 00 # queues[1].pad
+00 01 # queues[1].properties[0].type
+00 10 # queues[1].properties[0].length
+00 00 00 00 # queues[1].properties[0].pad
+00 06 # queues[1].properties[0].rate
+00 00 00 00 00 00 # queues[1].properties[0].pad2
+00 01 # queues[1].properties[1].type
+00 10 # queues[1].properties[1].length
+00 00 00 00 # queues[1].properties[1].pad
+00 07 # queues[1].properties[1].rate
+00 00 00 00 00 00 # queues[1].properties[1].pad2
+-- python
+ofp.message.queue_get_config_reply(
+    xid=0x12345678,
+    port=ofp.OFPP_LOCAL,
+    queues=[
+        ofp.packet_queue(queue_id=1, properties=[
+            ofp.queue_prop_min_rate(rate=5)]),
+        ofp.packet_queue(queue_id=2, properties=[
+            ofp.queue_prop_min_rate(rate=6),
+            ofp.queue_prop_min_rate(rate=7)])])
+-- c
+obj = of_queue_get_config_reply_new(OF_VERSION_1_0);
+of_queue_get_config_reply_port_set(obj, 65534);
+{
+    of_object_t list;
+    of_queue_get_config_reply_queues_bind(obj, &list);
+    {
+        of_object_t *obj = of_packet_queue_new(OF_VERSION_1_0);
+        {
+            of_object_t list;
+            of_packet_queue_properties_bind(obj, &list);
+            {
+                of_object_t *obj = of_queue_prop_min_rate_new(OF_VERSION_1_0);
+                of_queue_prop_min_rate_rate_set(obj, 5);
+                of_list_append(&list, obj);
+                of_object_delete(obj);
+            }
+        }
+        of_packet_queue_queue_id_set(obj, 1);
+        of_list_append(&list, obj);
+        of_object_delete(obj);
+    }
+    {
+        of_object_t *obj = of_packet_queue_new(OF_VERSION_1_0);
+        {
+            of_object_t list;
+            of_packet_queue_properties_bind(obj, &list);
+            {
+                of_object_t *obj = of_queue_prop_min_rate_new(OF_VERSION_1_0);
+                of_queue_prop_min_rate_rate_set(obj, 6);
+                of_list_append(&list, obj);
+                of_object_delete(obj);
+            }
+            {
+                of_object_t *obj = of_queue_prop_min_rate_new(OF_VERSION_1_0);
+                of_queue_prop_min_rate_rate_set(obj, 7);
+                of_list_append(&list, obj);
+                of_object_delete(obj);
+            }
+        }
+        of_packet_queue_queue_id_set(obj, 2);
+        of_list_append(&list, obj);
+        of_object_delete(obj);
+    }
+}
+of_queue_get_config_reply_xid_set(obj, 305419896);
diff --git a/test_data/of10/table_stats_entry.data b/test_data/of10/table_stats_entry.data
new file mode 100644
index 0000000..017e8ba
--- /dev/null
+++ b/test_data/of10/table_stats_entry.data
@@ -0,0 +1,33 @@
+-- binary
+03 # table_id
+00 00 00 # pad
+66 6f 6f 00 00 00 00 00 # name
+00 00 00 00 00 00 00 00 # name
+00 00 00 00 00 00 00 00 # name
+00 00 00 00 00 00 00 00 # name
+00 3f ff ff # wildcards
+00 00 00 05 # max_entries
+00 00 00 02 # active_count
+00 00 00 ff ff ff ff ff # lookup_count
+81 11 11 11 11 11 11 11 # matched_count
+-- python
+ofp.table_stats_entry(
+    table_id=3,
+    name="foo",
+    wildcards=ofp.OFPFW_ALL,
+    max_entries=5,
+    active_count=2,
+    lookup_count=1099511627775,
+    matched_count=9300233470495232273L)
+-- c
+obj = of_table_stats_entry_new(OF_VERSION_1_0);
+of_table_stats_entry_active_count_set(obj, 2);
+of_table_stats_entry_lookup_count_set(obj, 1099511627775ULL);
+of_table_stats_entry_matched_count_set(obj, 9300233470495232273ULL);
+of_table_stats_entry_max_entries_set(obj, 5);
+{
+    of_table_name_t name = "foo";
+    of_table_stats_entry_name_set(obj, name);
+}
+of_table_stats_entry_table_id_set(obj, 3);
+of_table_stats_entry_wildcards_set(obj, 4194303);
diff --git a/test_data/of12/empty_match.data b/test_data/of12/empty_match.data
new file mode 100644
index 0000000..6e65a47
--- /dev/null
+++ b/test_data/of12/empty_match.data
@@ -0,0 +1,6 @@
+-- binary
+00 01 # type
+00 04 # length
+00 00 00 00 # padding
+-- python
+ofp.match()
diff --git a/test_data/of12/match.data b/test_data/of12/match.data
new file mode 100644
index 0000000..3b1e3c2
--- /dev/null
+++ b/test_data/of12/match.data
@@ -0,0 +1,25 @@
+-- binary
+00 01 # type
+00 3C # length
+80 00 # oxm_list[0].class
+20 02 # oxm_list[0].type_len
+00 35 # oxm_list[0].value
+80 00 # oxm_list[1].class
+05 10 # oxm_list[1].type_len
+FE DC BA 98 76 54 32 10 # oxm_list[1].value
+FF FF FF FF 12 34 56 78 # oxm_list[1].mask
+80 00 # oxm_list[2].class
+08 06 # oxm_list[2].type_len
+01 02 03 04 05 06 # oxm_list[2].value
+80 00 # oxm_list[3].class
+36 10 # oxm_list[3].type_len
+12 12 12 12 12 12 12 12 # oxm_list[3].value
+12 12 12 12 12 12 12 12 # ...
+00 00 00 00 # pad
+-- python
+ofp.match([
+    ofp.oxm.udp_dst(53),
+    ofp.oxm.metadata_masked(0xFEDCBA9876543210, 0xFFFFFFFF12345678),
+    ofp.oxm.eth_src([1,2,3,4,5,6]),
+    ofp.oxm.ipv6_dst("\x12" * 16),
+])
diff --git a/test_data/of12/oxm_in_phy_port.data b/test_data/of12/oxm_in_phy_port.data
new file mode 100644
index 0000000..32ac1ea
--- /dev/null
+++ b/test_data/of12/oxm_in_phy_port.data
@@ -0,0 +1,7 @@
+-- binary
+80 00 # class
+02 # type/masked
+04 # length
+00 00 00 2a # value
+-- python
+ofp.oxm.in_phy_port(value=42)
diff --git a/test_data/of12/oxm_in_phy_port_masked.data b/test_data/of12/oxm_in_phy_port_masked.data
new file mode 100644
index 0000000..99b0ad3
--- /dev/null
+++ b/test_data/of12/oxm_in_phy_port_masked.data
@@ -0,0 +1,8 @@
+-- binary
+80 00 # class
+03 # type/masked
+08 # length
+00 00 00 2a # value
+aa bb cc dd # mask
+-- python
+ofp.oxm.in_phy_port_masked(value=42, value_mask=0xaabbccdd)
diff --git a/test_data/of12/oxm_ipv6_dst.data b/test_data/of12/oxm_ipv6_dst.data
new file mode 100644
index 0000000..23c8fb5
--- /dev/null
+++ b/test_data/of12/oxm_ipv6_dst.data
@@ -0,0 +1,8 @@
+-- binary
+80 00 # class
+36 # type/masked
+10 # length
+00 01 02 03 04 05 06 07 # value
+08 09 0a 0b 0c 0d 0e 0f # ...
+-- python
+ofp.oxm.ipv6_dst('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
diff --git a/test_data/of13/TODO b/test_data/of13/TODO
new file mode 100644
index 0000000..a232b54
--- /dev/null
+++ b/test_data/of13/TODO
@@ -0,0 +1,52 @@
+# This list is not complete.
+
+flow_modify
+flow_modify_strict
+flow_delete
+flow_delete_strict
+
+port_mod
+table_mod
+
+desc_stats_request
+desc_stats_reply
+flow_stats_request
+flow_stats_reply
+aggregate_stats_request
+aggregate_stats_reply
+port_stats_request
+port_stats_reply
+queue_stats_request
+queue_stats_reply
+group_stats_request
+group_desc_stats_request
+group_features_stats_request
+group_features_stats_reply
+meter_stats_request
+meter_config_stats_request
+meter_features_stats_request
+table_features_stats_request
+table_features_stats_reply
+port_desc_stats_request
+port_desc_stats_reply
+
+barrier_request
+barrier_reply
+queue_get_config_request
+queue_get_config_reply
+role_request
+role_reply
+get_async_request
+get_async_reply
+set_async
+meter_mod
+
+# TODO test experimenter messages
+
+instruction_write_metadata
+instruction_write_actions
+instruction_apply_actions
+instruction_clear_actions
+instruction_meter
+
+# TODO test experimenter instructions
diff --git a/test_data/of13/action_output.data b/test_data/of13/action_output.data
new file mode 100644
index 0000000..7653a30
--- /dev/null
+++ b/test_data/of13/action_output.data
@@ -0,0 +1,10 @@
+-- binary
+00 00 # type
+00 10 # length
+00 00 00 32 # port
+ff ff # max_len
+00 00 00 00 00 00 # pad
+-- python
+ofp.action.output(port=50, max_len=65535)
+-- java
+builder.setPort(OFPort.of(50)).setMaxLen(65535)
diff --git a/test_data/of13/action_set_field__bsn_lag_id.data b/test_data/of13/action_set_field__bsn_lag_id.data
new file mode 100644
index 0000000..f1a8aa1
--- /dev/null
+++ b/test_data/of13/action_set_field__bsn_lag_id.data
@@ -0,0 +1,11 @@
+-- binary
+00 19 # type
+00 10 # length
+00 03 02 04 # OXM header
+12 34 56 78 # OXM value
+00 00 00 00 # pad
+-- python
+ofp.action.set_field(field=ofp.oxm.bsn_lag_id(0x12345678))
+-- java
+OFOxms oxms = OFFactories.getFactory(OFVersion.OF_13).oxms();
+builder.setField(oxms.bsnLagId(LagId.of(0x12345678)))
diff --git a/test_data/of13/action_set_field__eth_dst.data b/test_data/of13/action_set_field__eth_dst.data
new file mode 100644
index 0000000..833bb36
--- /dev/null
+++ b/test_data/of13/action_set_field__eth_dst.data
@@ -0,0 +1,11 @@
+-- binary
+00 19 # type
+00 10 # length
+80 00 06 06 # OXM header
+00 01 02 03 04 05 # OXM value
+00 00 # pad
+-- python
+ofp.action.set_field(field=ofp.oxm.eth_dst([0, 1, 2, 3, 4, 5]))
+-- java
+OFOxms oxms = OFFactories.getFactory(OFVersion.OF_13).oxms();
+builder.setField(oxms.ethDst(MacAddress.of("00:01:02:03:04:05")))
diff --git a/test_data/of13/action_set_field__ipv6_src.data b/test_data/of13/action_set_field__ipv6_src.data
new file mode 100644
index 0000000..cfa2738
--- /dev/null
+++ b/test_data/of13/action_set_field__ipv6_src.data
@@ -0,0 +1,10 @@
+-- binary
+00 19 # type
+00 18 # length
+80 00 34 10 # OXM header
+00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f # OXM value
+-- python
+ofp.action.set_field(field=ofp.oxm.ipv6_src("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"))
+-- java
+OFOxms oxms = OFFactories.getFactory(OFVersion.OF_13).oxms();
+builder.setField(oxms.ipv6Src(IPv6Address.of("0001:0203:0405:0607:0809:0a0b:0c0d:0e0f")))
diff --git a/test_data/of13/action_set_field__tcp_src.data b/test_data/of13/action_set_field__tcp_src.data
new file mode 100644
index 0000000..41c4780
--- /dev/null
+++ b/test_data/of13/action_set_field__tcp_src.data
@@ -0,0 +1,11 @@
+-- binary
+00 19 # type
+00 10 # length
+80 00 1a 02 # OXM header
+00 32 # OXM value
+00 00 00 00 00 00 # pad
+-- python
+ofp.action.set_field(field=ofp.oxm.tcp_src(50))
+-- java
+OFOxms oxms = OFFactories.getFactory(OFVersion.OF_13).oxms();
+builder.setField(oxms.tcpSrc(TransportPort.of(50)))
diff --git a/test_data/of13/bad_match_error_msg.data b/test_data/of13/bad_match_error_msg.data
new file mode 100644
index 0000000..80a925b
--- /dev/null
+++ b/test_data/of13/bad_match_error_msg.data
@@ -0,0 +1,22 @@
+-- binary
+04 01 # version / type
+00 0f # length
+12 34 56 78 # xid
+00 04 # err_type
+00 08 # code
+61 62 63 # data
+-- python
+ofp.message.bad_match_error_msg(
+    xid=0x12345678,
+    code=ofp.OFPBMC_BAD_MASK,
+    data="abc")
+-- c
+obj = of_bad_match_error_msg_new(OF_VERSION_1_3);
+of_bad_match_error_msg_xid_set(obj, 0x12345678);
+of_bad_match_error_msg_code_set(obj, OF_MATCH_FAILED_BAD_MASK_BY_VERSION(OF_VERSION_1_3));
+of_octets_t data = { .bytes=3, .data=(uint8_t *)"\x61\x62\x63" };
+of_bad_match_error_msg_data_set(obj, &data);
+-- java
+builder.setXid(0x12345678)
+    .setCode(OFBadMatchCode.BAD_MASK)
+    .setData(new byte[] { 0x61, 0x62, 0x63 });
diff --git a/test_data/of13/bad_request_error_msg.data b/test_data/of13/bad_request_error_msg.data
new file mode 100644
index 0000000..cc2c139
--- /dev/null
+++ b/test_data/of13/bad_request_error_msg.data
@@ -0,0 +1,22 @@
+-- binary
+04 01 # version / type
+00 0f # length
+12 34 56 78 # xid
+00 01 # err_type
+00 08 # code
+61 62 63 # data
+-- python
+ofp.message.bad_request_error_msg(
+    xid=0x12345678,
+    code=ofp.OFPBRC_BUFFER_UNKNOWN,
+    data="abc")
+-- c
+obj = of_bad_request_error_msg_new(OF_VERSION_1_3);
+of_bad_request_error_msg_xid_set(obj, 0x12345678);
+of_bad_request_error_msg_code_set(obj, OF_REQUEST_FAILED_BUFFER_UNKNOWN_BY_VERSION(OF_VERSION_1_3));
+of_octets_t data = { .bytes=3, .data=(uint8_t *)"\x61\x62\x63" };
+of_bad_request_error_msg_data_set(obj, &data);
+-- java
+builder.setXid(0x12345678)
+    .setCode(OFBadRequestCode.BUFFER_UNKNOWN)
+    .setData(new byte[] { 0x61, 0x62, 0x63 });
diff --git a/test_data/of13/bsn_flow_idle.data b/test_data/of13/bsn_flow_idle.data
new file mode 100644
index 0000000..61b656f
--- /dev/null
+++ b/test_data/of13/bsn_flow_idle.data
@@ -0,0 +1,54 @@
+-- binary
+04 04 # version, type
+00 38 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 28 # subtype
+fe dc ba 98 76 54 32 10 # cookie
+42 68 # priority
+14 # table_id
+00 # pad
+00 00 00 00 # pad
+00 01 # match.type
+00 16 # match.length
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+80 00 2A 02 # match.oxm_list[1].type_len - ARP_OP
+00 01 # match.oxm_list[1].value
+00 00 # match.pad
+-- python
+ofp.message.bsn_flow_idle(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    priority=17000,
+    table_id=20,
+    match=ofp.match([
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.arp_op(value=1),
+    ]))
+-- c
+obj = of_bsn_flow_idle_new(OF_VERSION_1_3);
+of_bsn_flow_idle_xid_set(obj, 0x12345678);
+of_bsn_flow_idle_cookie_set(obj, 0xFEDCBA9876543210);
+of_bsn_flow_idle_priority_set(obj, 17000);
+of_bsn_flow_idle_table_id_set(obj, 20);
+{
+    of_match_t match = { OF_VERSION_1_3 };
+    match.fields.in_port = 4;
+    match.masks.in_port = 5;
+    match.fields.arp_op = 1;
+    OF_MATCH_MASK_ARP_OP_EXACT_SET(&match);
+    of_bsn_flow_idle_match_set(obj, &match);
+}
+-- java
+builder.setXid(0x12345678)
+    .setCookie(U64.parseHex("FEDCBA9876543210"))
+    .setPriority(17000)
+    .setTableId(TableId.of(20))
+    .setMatch(
+        factory.buildMatch()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ARP_OP, ArpOpcode.of(1))
+                .build()
+    );
diff --git a/test_data/of13/bsn_gentable_bucket_stats_reply.data b/test_data/of13/bsn_gentable_bucket_stats_reply.data
new file mode 100644
index 0000000..8e0772f
--- /dev/null
+++ b/test_data/of13/bsn_gentable_bucket_stats_reply.data
@@ -0,0 +1,54 @@
+-- binary
+04 13 # version, type
+00 38 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 05 # subtype
+88 77 66 55 44 33 22 11 FF EE DD CC BB AA 99 88 # entries[0].checksum
+12 34 23 45 34 56 45 67 56 78 67 89 78 9A 89 AB # entries[1].checksum
+-- python
+ofp.message.bsn_gentable_bucket_stats_reply(
+    xid=0x12345678,
+    entries=[
+        ofp.bsn_gentable_bucket_stats_entry(
+            checksum=0x8877665544332211FFEEDDCCBBAA9988),
+        ofp.bsn_gentable_bucket_stats_entry(
+            checksum=0x123423453456456756786789789A89AB),
+    ])
+-- java
+builder.setXid(0x12345678)
+    .setEntries(
+        ImmutableList.<OFBsnGentableBucketStatsEntry>of(
+            factory.bsnGentableBucketStatsEntry(OFChecksum128.of(0x8877665544332211L, 0xFFEEDDCCBBAA9988L)),
+            factory.bsnGentableBucketStatsEntry(OFChecksum128.of(0x1234234534564567L, 0x56786789789A89ABL))
+        )
+    )
+-- c
+obj = of_bsn_gentable_bucket_stats_reply_new(OF_VERSION_1_3);
+of_bsn_gentable_bucket_stats_reply_xid_set(obj, 0x12345678);
+{
+    of_object_t *list = of_list_bsn_gentable_bucket_stats_entry_new(OF_VERSION_1_3);
+    {
+        of_object_t *entry = of_bsn_gentable_bucket_stats_entry_new(OF_VERSION_1_3);
+        {
+            of_checksum_128_t checksum = { 0x8877665544332211L, 0xFFEEDDCCBBAA9988L };
+            of_bsn_gentable_bucket_stats_entry_checksum_set(entry, checksum);
+        }
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    {
+        of_object_t *entry = of_bsn_gentable_bucket_stats_entry_new(OF_VERSION_1_3);
+        {
+            of_checksum_128_t checksum = { 0x1234234534564567L, 0x56786789789A89ABL };
+            of_bsn_gentable_bucket_stats_entry_checksum_set(entry, checksum);
+        }
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    of_bsn_gentable_bucket_stats_reply_entries_set(obj, list);
+    of_object_delete(list);
+}
diff --git a/test_data/of13/bsn_gentable_bucket_stats_request.data b/test_data/of13/bsn_gentable_bucket_stats_request.data
new file mode 100644
index 0000000..b756b54
--- /dev/null
+++ b/test_data/of13/bsn_gentable_bucket_stats_request.data
@@ -0,0 +1,14 @@
+-- binary
+04 12 # version, type
+00 1a # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 05 # subtype
+12 34 # table_id
+-- python
+ofp.message.bsn_gentable_bucket_stats_request(
+    xid=0x12345678,
+    table_id=0x1234)
diff --git a/test_data/of13/bsn_gentable_clear_request.data b/test_data/of13/bsn_gentable_clear_request.data
new file mode 100644
index 0000000..847af3c
--- /dev/null
+++ b/test_data/of13/bsn_gentable_clear_request.data
@@ -0,0 +1,33 @@
+-- binary
+04 04 # version, type
+00 34 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 30 # subtype
+00 14 # table_id
+00 00 # pad
+fe dc ba 98 76 54 32 10 ff ee cc bb aa 99 00 00 # checksum
+ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 00 # checksum_mask
+-- python
+ofp.message.bsn_gentable_clear_request(
+    xid=0x12345678,
+    table_id=20,
+    checksum=     0xFEDCBA9876543210FFEECCBBAA990000,
+    checksum_mask=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000)
+-- java
+builder.setXid(0x12345678)
+    .setChecksum(OFChecksum128.of(0xFEDCBA9876543210L, 0xFFEECCBBAA990000L))
+    .setChecksumMask(OFChecksum128.of(0xFFFFFFFFFFFFFFFFL, 0xFFFFFFFFFFFF0000L))
+    .setTableId(GenTableId.of(20))
+-- c
+obj = of_bsn_gentable_clear_request_new(OF_VERSION_1_3);
+of_bsn_gentable_clear_request_xid_set(obj, 0x12345678);
+of_bsn_gentable_clear_request_table_id_set(obj, 20);
+{
+    of_checksum_128_t checksum = { 0xFEDCBA9876543210L, 0xFFEECCBBAA990000L };
+    of_bsn_gentable_clear_request_checksum_set(obj, checksum);
+}
+{
+    of_checksum_128_t checksum_mask = { 0xFFFFFFFFFFFFFFFFL, 0xFFFFFFFFFFFF0000L };
+    of_bsn_gentable_clear_request_checksum_mask_set(obj, checksum_mask);
+}
diff --git a/test_data/of13/bsn_gentable_desc_stats_reply.data b/test_data/of13/bsn_gentable_desc_stats_reply.data
new file mode 100644
index 0000000..180a8a1
--- /dev/null
+++ b/test_data/of13/bsn_gentable_desc_stats_reply.data
@@ -0,0 +1,86 @@
+-- binary
+04 13 # version, type
+00 78 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 04 # subtype
+
+# entries[0]
+00 30 # length
+00 00 # table id
+74 61 62 6c 65 20 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 # name
+00 00 00 20 # buckets_size
+00 00 00 40 # max_entries
+00 00 00 00 # pad
+
+# entries[1]
+00 30 # length
+00 01 # table id
+74 61 62 6c 65 20 31 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e 2e # name
+00 00 00 40 # buckets_size
+00 00 00 80 # max_entries
+00 00 00 00 # pad
+-- python
+ofp.message.bsn_gentable_desc_stats_reply(
+    xid=0x12345678,
+    entries=[
+        ofp.bsn_gentable_desc_stats_entry(
+            table_id=0,
+            name="table 0",
+            buckets_size=32,
+            max_entries=64),
+        ofp.bsn_gentable_desc_stats_entry(
+            table_id=1,
+            name="table 1".ljust(32, '.'),
+            buckets_size=64,
+            max_entries=128),
+    ])
+-- java
+builder.setXid(0x12345678)
+    .setEntries(
+        ImmutableList.<OFBsnGentableDescStatsEntry>of(
+            factory.buildBsnGentableDescStatsEntry()
+                .setTableId(GenTableId.of(0))
+                .setName("table 0")
+                .setBucketsSize(32)
+                .setMaxEntries(64)
+                .build(),
+            factory.buildBsnGentableDescStatsEntry()
+                .setTableId(GenTableId.of(1))
+                .setName("table 1.........................")
+                .setBucketsSize(64)
+                .setMaxEntries(128)
+                .build()
+        )
+    )
+-- c
+obj = of_bsn_gentable_desc_stats_reply_new(OF_VERSION_1_3);
+of_bsn_gentable_desc_stats_reply_xid_set(obj, 0x12345678);
+{
+    of_object_t *list = of_list_bsn_gentable_desc_stats_entry_new(OF_VERSION_1_3);
+    {
+        of_table_name_t name = "table 0";
+        of_object_t *entry = of_bsn_gentable_desc_stats_entry_new(OF_VERSION_1_3);
+        of_bsn_gentable_desc_stats_entry_table_id_set(entry, 0);
+        of_bsn_gentable_desc_stats_entry_name_set(entry, name);
+        of_bsn_gentable_desc_stats_entry_buckets_size_set(entry, 32);
+        of_bsn_gentable_desc_stats_entry_max_entries_set(entry, 64);
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    {
+        of_table_name_t name = "table 1.........................";
+        of_object_t *entry = of_bsn_gentable_desc_stats_entry_new(OF_VERSION_1_3);
+        of_bsn_gentable_desc_stats_entry_table_id_set(entry, 1);
+        of_bsn_gentable_desc_stats_entry_name_set(entry, name);
+        of_bsn_gentable_desc_stats_entry_buckets_size_set(entry, 64);
+        of_bsn_gentable_desc_stats_entry_max_entries_set(entry, 128);
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    of_bsn_gentable_desc_stats_reply_entries_set(obj, list);
+    of_object_delete(list);
+}
diff --git a/test_data/of13/bsn_gentable_desc_stats_request.data b/test_data/of13/bsn_gentable_desc_stats_request.data
new file mode 100644
index 0000000..977fe29
--- /dev/null
+++ b/test_data/of13/bsn_gentable_desc_stats_request.data
@@ -0,0 +1,12 @@
+-- binary
+04 12 # version, type
+00 18 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 04 # subtype
+-- python
+ofp.message.bsn_gentable_desc_stats_request(
+    xid=0x12345678)
diff --git a/test_data/of13/bsn_gentable_entry_add.data b/test_data/of13/bsn_gentable_entry_add.data
new file mode 100644
index 0000000..02c2bb4
--- /dev/null
+++ b/test_data/of13/bsn_gentable_entry_add.data
@@ -0,0 +1,98 @@
+-- binary
+04 04 # version, type
+00 48 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 2e # subtype
+00 14 # table_id
+00 12 # key_length
+fe dc ba 98 76 54 32 10 ff ee cc bb aa 99 88 77 # checksum
+
+00 00 # key[0].type
+00 08 # key[0].length
+00 00 00 05 # key[0].value
+
+00 01 # key[1].type
+00 0a # key[1].length
+01 23 45 67 89 ab # key[1].value
+
+00 00 # value[0].type
+00 08 # value[0].length
+00 00 00 06 # value[0].value
+
+00 01 # value[1].type
+00 0a # value[1].length
+ff ee dd cc bb aa # value[1].value
+-- python
+ofp.message.bsn_gentable_entry_add(
+    xid=0x12345678,
+    checksum=0xFEDCBA9876543210FFEECCBBAA998877,
+    table_id=20,
+    key=[
+        ofp.bsn_tlv.port(5),
+        ofp.bsn_tlv.mac([0x01, 0x23, 0x45, 0x67, 0x89, 0xab]),
+    ],
+    value=[
+        ofp.bsn_tlv.port(6),
+        ofp.bsn_tlv.mac([0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa]),
+    ])
+-- java
+builder.setXid(0x12345678)
+    .setChecksum(OFChecksum128.of(0xFEDCBA9876543210L, 0xFFEECCBBAA998877L))
+    .setTableId(GenTableId.of(20))
+    .setKey(
+        ImmutableList.<OFBsnTlv>of(
+            factory.bsnTlvs().port(OFPort.of(5)),
+            factory.bsnTlvs().mac(MacAddress.of("01:23:45:67:89:ab"))
+        )
+    )
+    .setValue(
+        ImmutableList.<OFBsnTlv>of(
+            factory.bsnTlvs().port(OFPort.of(6)),
+            factory.bsnTlvs().mac(MacAddress.of("ff:ee:dd:cc:bb:aa"))
+        )
+    )
+-- c
+obj = of_bsn_gentable_entry_add_new(OF_VERSION_1_3);
+of_bsn_gentable_entry_add_xid_set(obj, 0x12345678);
+of_bsn_gentable_entry_add_table_id_set(obj, 20);
+{
+    of_checksum_128_t checksum = { 0xFEDCBA9876543210L, 0xFFEECCBBAA998877L };
+    of_bsn_gentable_entry_add_checksum_set(obj, checksum);
+}
+{
+    of_object_t *list = of_list_bsn_tlv_new(OF_VERSION_1_3);
+    {
+        of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+        of_bsn_tlv_port_value_set(tlv, 5);
+        of_list_append(list, tlv);
+        of_object_delete(tlv);
+    }
+    {
+        of_mac_addr_t mac = { { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab } };
+        of_object_t *tlv = of_bsn_tlv_mac_new(OF_VERSION_1_3);
+        of_bsn_tlv_mac_value_set(tlv, mac);
+        of_list_append(list, tlv);
+        of_object_delete(tlv);
+    }
+    of_bsn_gentable_entry_add_key_set(obj, list);
+    of_object_delete(list);
+}
+{
+    of_object_t *list = of_list_bsn_tlv_new(OF_VERSION_1_3);
+    {
+        of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+        of_bsn_tlv_port_value_set(tlv, 6);
+        of_list_append(list, tlv);
+        of_object_delete(tlv);
+    }
+    {
+        of_mac_addr_t mac = { { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa } };
+        of_object_t *tlv = of_bsn_tlv_mac_new(OF_VERSION_1_3);
+        of_bsn_tlv_mac_value_set(tlv, mac);
+        of_list_append(list, tlv);
+        of_object_delete(tlv);
+    }
+    of_bsn_gentable_entry_add_value_set(obj, list);
+    of_object_delete(list);
+}
diff --git a/test_data/of13/bsn_gentable_entry_delete.data b/test_data/of13/bsn_gentable_entry_delete.data
new file mode 100644
index 0000000..58c6c03
--- /dev/null
+++ b/test_data/of13/bsn_gentable_entry_delete.data
@@ -0,0 +1,54 @@
+-- binary
+04 04 # version, type
+00 24 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 2f # subtype
+00 14 # table_id
+
+00 00 # key[0].type
+00 08 # key[0].length
+00 00 00 05 # key[0].value
+
+00 01 # key[1].type
+00 0a # key[1].length
+01 23 45 67 89 ab # key[1].value
+-- python
+ofp.message.bsn_gentable_entry_delete(
+    xid=0x12345678,
+    table_id=20,
+    key=[
+        ofp.bsn_tlv.port(5),
+        ofp.bsn_tlv.mac([0x01, 0x23, 0x45, 0x67, 0x89, 0xab]),
+    ])
+-- java
+builder.setXid(0x12345678)
+    .setTableId(GenTableId.of(20))
+    .setKey(
+        ImmutableList.<OFBsnTlv>of(
+            factory.bsnTlvs().port(OFPort.of(5)),
+            factory.bsnTlvs().mac(MacAddress.of("01:23:45:67:89:ab"))
+        )
+    )
+-- c
+obj = of_bsn_gentable_entry_delete_new(OF_VERSION_1_3);
+of_bsn_gentable_entry_delete_xid_set(obj, 0x12345678);
+of_bsn_gentable_entry_delete_table_id_set(obj, 20);
+{
+    of_object_t *list = of_list_bsn_tlv_new(OF_VERSION_1_3);
+    {
+        of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+        of_bsn_tlv_port_value_set(tlv, 5);
+        of_list_append(list, tlv);
+        of_object_delete(tlv);
+    }
+    {
+        of_mac_addr_t mac = { { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab } };
+        of_object_t *tlv = of_bsn_tlv_mac_new(OF_VERSION_1_3);
+        of_bsn_tlv_mac_value_set(tlv, mac);
+        of_list_append(list, tlv);
+        of_object_delete(tlv);
+    }
+    of_bsn_gentable_entry_delete_key_set(obj, list);
+    of_object_delete(list);
+}
diff --git a/test_data/of13/bsn_gentable_entry_desc_stats_reply.data b/test_data/of13/bsn_gentable_entry_desc_stats_reply.data
new file mode 100644
index 0000000..4035f4c
--- /dev/null
+++ b/test_data/of13/bsn_gentable_entry_desc_stats_reply.data
@@ -0,0 +1,148 @@
+-- binary
+04 13 # version, type
+00 64 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 2 # subtype
+
+# entries[0]
+00 26 # length
+00 08 # key_length
+fe dc ba 98 76 54 32 10 ff ee cc bb aa 99 88 00 # checksum
+00 00 # key[0].type
+00 08 # key[0].length
+00 00 00 05 # key[0].value
+00 01 # value[0].type
+00 0a # value[0].length
+ff ee dd cc bb 00 # value[0].value
+
+# entries[1]
+00 26 # length
+00 08 # key_length
+fe dc ba 98 76 54 32 10 ff ee cc bb aa 99 88 01 # checksum
+00 00 # key[0].type
+00 08 # key[0].length
+00 00 00 06 # key[0].value
+00 01 # value[0].type
+00 0a # value[0].length
+ff ee dd cc bb 01 # value[0].value
+-- python
+ofp.message.bsn_gentable_entry_desc_stats_reply(
+    xid=0x12345678,
+    entries=[
+        ofp.bsn_gentable_entry_desc_stats_entry(
+            checksum=0xFEDCBA9876543210FFEECCBBAA998800,
+            key=[
+                ofp.bsn_tlv.port(5),
+            ],
+            value=[
+                ofp.bsn_tlv.mac([0xff, 0xee, 0xdd, 0xcc, 0xbb, 0x00]),
+            ]),
+        ofp.bsn_gentable_entry_desc_stats_entry(
+            checksum=0xFEDCBA9876543210FFEECCBBAA998801,
+            key=[
+                ofp.bsn_tlv.port(6),
+            ],
+            value=[
+                ofp.bsn_tlv.mac([0xff, 0xee, 0xdd, 0xcc, 0xbb, 0x01]),
+            ]),
+    ])
+-- java
+builder.setXid(0x12345678)
+    .setEntries(
+        ImmutableList.<OFBsnGentableEntryDescStatsEntry>of(
+            factory.buildBsnGentableEntryDescStatsEntry()
+                .setChecksum(OFChecksum128.of(0xFEDCBA9876543210L, 0xFFEECCBBAA998800L))
+                .setKey(ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().port(OFPort.of(5))
+                ))
+                .setValue(ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().mac(MacAddress.of("ff:ee:dd:cc:bb:00"))
+                ))
+                .build(),
+            factory.buildBsnGentableEntryDescStatsEntry()
+                .setChecksum(OFChecksum128.of(0xFEDCBA9876543210L, 0xFFEECCBBAA998801L))
+                .setKey(ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().port(OFPort.of(6))
+                ))
+                .setValue(ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().mac(MacAddress.of("ff:ee:dd:cc:bb:01"))
+                ))
+                .build()
+        )
+    )
+-- c
+obj = of_bsn_gentable_entry_desc_stats_reply_new(OF_VERSION_1_3);
+of_bsn_gentable_entry_desc_stats_reply_xid_set(obj, 0x12345678);
+{
+    of_object_t *list = of_list_bsn_gentable_entry_desc_stats_entry_new(OF_VERSION_1_3);
+    {
+        of_object_t *entry = of_bsn_gentable_entry_desc_stats_entry_new(OF_VERSION_1_3);
+        {
+            of_checksum_128_t checksum = { 0xFEDCBA9876543210L, 0xFFEECCBBAA998800L };
+            of_bsn_gentable_entry_desc_stats_entry_checksum_set(entry, checksum);
+        }
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+                of_bsn_tlv_port_value_set(tlv, 5);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_desc_stats_entry_key_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_mac_new(OF_VERSION_1_3);
+                of_mac_addr_t mac = { { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0x00 } };
+                of_bsn_tlv_mac_value_set(tlv, mac);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_desc_stats_entry_value_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    {
+        of_object_t *entry = of_bsn_gentable_entry_desc_stats_entry_new(OF_VERSION_1_3);
+        {
+            of_checksum_128_t checksum = { 0xFEDCBA9876543210L, 0xFFEECCBBAA998801L };
+            of_bsn_gentable_entry_desc_stats_entry_checksum_set(entry, checksum);
+        }
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+                of_bsn_tlv_port_value_set(tlv, 6);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_desc_stats_entry_key_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_mac_new(OF_VERSION_1_3);
+                of_mac_addr_t mac = { { 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0x01 } };
+                of_bsn_tlv_mac_value_set(tlv, mac);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_desc_stats_entry_value_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    of_bsn_gentable_entry_desc_stats_reply_entries_set(obj, list);
+    of_object_delete(list);
+}
diff --git a/test_data/of13/bsn_gentable_entry_desc_stats_request.data b/test_data/of13/bsn_gentable_entry_desc_stats_request.data
new file mode 100644
index 0000000..4971f06
--- /dev/null
+++ b/test_data/of13/bsn_gentable_entry_desc_stats_request.data
@@ -0,0 +1,19 @@
+-- binary
+04 12 # version, type
+00 3c # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 2 # subtype
+00 14 # table_id
+00 00 # pad
+fe dc ba 98 76 54 32 10 ff ee cc bb aa 99 00 00 # checksum
+ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 00 # checksum_mask
+-- python
+ofp.message.bsn_gentable_entry_desc_stats_request(
+    xid=0x12345678,
+    table_id=20,
+    checksum=     0xFEDCBA9876543210FFEECCBBAA990000,
+    checksum_mask=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000)
diff --git a/test_data/of13/bsn_gentable_entry_stats_reply.data b/test_data/of13/bsn_gentable_entry_stats_reply.data
new file mode 100644
index 0000000..255de88
--- /dev/null
+++ b/test_data/of13/bsn_gentable_entry_stats_reply.data
@@ -0,0 +1,154 @@
+-- binary
+04 13 # version, type
+00 60 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 03 # subtype
+
+# entries[0]
+00 24 # length
+00 08 # key_length
+00 00 # key[0].type
+00 08 # key[0].length
+00 00 00 05 # key[0].value
+00 02 # stats[0].type
+00 0c # stats[0].length
+00 00 00 00 00 00 00 64 # stats[0].value
+00 03 # stats[0].type
+00 0c # stats[0].length
+00 00 00 00 00 00 00 65 # stats[0].value
+
+# entries[1]
+00 24 # length
+00 08 # key_length
+00 00 # key[0].type
+00 08 # key[0].length
+00 00 00 06 # key[0].value
+00 02 # stats[0].type
+00 0c # stats[0].length
+00 00 00 00 00 00 00 64 # stats[0].value
+00 03 # stats[0].type
+00 0c # stats[0].length
+00 00 00 00 00 00 00 65 # stats[0].value
+-- python
+ofp.message.bsn_gentable_entry_stats_reply(
+    xid=0x12345678,
+    entries=[
+        ofp.bsn_gentable_entry_stats_entry(
+            key=[
+                ofp.bsn_tlv.port(5),
+            ],
+            stats=[
+                ofp.bsn_tlv.rx_packets(100),
+                ofp.bsn_tlv.tx_packets(101),
+            ]),
+        ofp.bsn_gentable_entry_stats_entry(
+            key=[
+                ofp.bsn_tlv.port(6),
+            ],
+            stats=[
+                ofp.bsn_tlv.rx_packets(100),
+                ofp.bsn_tlv.tx_packets(101),
+            ]),
+    ])
+-- java
+builder.setXid(0x12345678)
+    .setEntries(
+        ImmutableList.<OFBsnGentableEntryStatsEntry>of(
+            factory.bsnGentableEntryStatsEntry(
+                ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().port(OFPort.of(5))
+                ),
+                ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().rxPackets(U64.of(100)),
+                    factory.bsnTlvs().txPackets(U64.of(101))
+                )
+            ),
+            factory.bsnGentableEntryStatsEntry(
+                ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().port(OFPort.of(6))
+                ),
+                ImmutableList.<OFBsnTlv>of(
+                    factory.bsnTlvs().rxPackets(U64.of(100)),
+                    factory.bsnTlvs().txPackets(U64.of(101))
+                )
+            )
+        )
+    )
+-- c
+obj = of_bsn_gentable_entry_stats_reply_new(OF_VERSION_1_3);
+of_bsn_gentable_entry_stats_reply_xid_set(obj, 0x12345678);
+{
+    of_object_t *list = of_list_bsn_gentable_entry_stats_entry_new(OF_VERSION_1_3);
+    {
+        of_object_t *entry = of_bsn_gentable_entry_stats_entry_new(OF_VERSION_1_3);
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+                of_bsn_tlv_port_value_set(tlv, 5);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_stats_entry_key_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_rx_packets_new(OF_VERSION_1_3);
+                of_bsn_tlv_rx_packets_value_set(tlv, 100);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            {
+                of_object_t *tlv = of_bsn_tlv_tx_packets_new(OF_VERSION_1_3);
+                of_bsn_tlv_tx_packets_value_set(tlv, 101);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_stats_entry_stats_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    {
+        of_object_t *entry = of_bsn_gentable_entry_stats_entry_new(OF_VERSION_1_3);
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_port_new(OF_VERSION_1_3);
+                of_bsn_tlv_port_value_set(tlv, 6);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_stats_entry_key_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        {
+            of_object_t *tlvs = of_list_bsn_tlv_new(OF_VERSION_1_3);
+            {
+                of_object_t *tlv = of_bsn_tlv_rx_packets_new(OF_VERSION_1_3);
+                of_bsn_tlv_rx_packets_value_set(tlv, 100);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            {
+                of_object_t *tlv = of_bsn_tlv_tx_packets_new(OF_VERSION_1_3);
+                of_bsn_tlv_tx_packets_value_set(tlv, 101);
+                of_list_append(tlvs, tlv);
+                of_object_delete(tlv);
+            }
+            of_bsn_gentable_entry_stats_entry_stats_set(entry, tlvs);
+            of_object_delete(tlvs);
+        }
+        of_list_append(list, entry);
+        of_object_delete(entry);
+    }
+    of_bsn_gentable_entry_stats_reply_entries_set(obj, list);
+    of_object_delete(list);
+}
diff --git a/test_data/of13/bsn_gentable_entry_stats_request.data b/test_data/of13/bsn_gentable_entry_stats_request.data
new file mode 100644
index 0000000..a288a55
--- /dev/null
+++ b/test_data/of13/bsn_gentable_entry_stats_request.data
@@ -0,0 +1,19 @@
+-- binary
+04 12 # version, type
+00 3c # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 3 # subtype
+00 14 # table_id
+00 00 # pad
+fe dc ba 98 76 54 32 10 ff ee cc bb aa 99 00 00 # checksum
+ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 00 # checksum_mask
+-- python
+ofp.message.bsn_gentable_entry_stats_request(
+    xid=0x12345678,
+    table_id=20,
+    checksum=     0xFEDCBA9876543210FFEECCBBAA990000,
+    checksum_mask=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF0000)
diff --git a/test_data/of13/bsn_gentable_set_buckets_size.data b/test_data/of13/bsn_gentable_set_buckets_size.data
new file mode 100644
index 0000000..7833736
--- /dev/null
+++ b/test_data/of13/bsn_gentable_set_buckets_size.data
@@ -0,0 +1,14 @@
+-- binary
+04 04 # version, type
+00 18 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 32 # subtype
+00 14 # table_id
+00 00 # pad
+00 11 22 33 # buckets_size
+-- python
+ofp.message.bsn_gentable_set_buckets_size(
+    xid=0x12345678,
+    table_id=20,
+    buckets_size=0x00112233)
diff --git a/test_data/of13/bsn_lacp_stats_reply.data b/test_data/of13/bsn_lacp_stats_reply.data
new file mode 100644
index 0000000..3e12237
--- /dev/null
+++ b/test_data/of13/bsn_lacp_stats_reply.data
@@ -0,0 +1,72 @@
+-- binary
+04 13 # version, type
+00 3c # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 1 # subtype
+00 00 f1 11 # entries[0].port_no
+f2 22 # entries[0].actor_sys_priority
+01 02 03 04 05 06 # entries[0].actor_sys_mac
+f3 33 # entries[0].actor_port_priority
+f4 44 # entries[0].actor_port_num
+f5 55 # entries[0].actor_port_key
+02 # entries[0].convergence_status
+00 # pad
+f6 66 # entries[0].partner_sys_priority
+0a 0b 0c 0d 0e 0f # entries[0].partner_sys_mac
+f7 77 # entries[0].partner_port_priority
+f8 88 # entries[0].partner_port_num
+f9 99 # entries[0].partner_port_key
+00 00 # pad
+-- python
+ofp.message.bsn_lacp_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    entries=[
+        ofp.bsn_lacp_stats_entry(
+            port_no=0xf111,
+            actor_sys_priority=0xf222,
+            actor_sys_mac=[1, 2, 3, 4, 5, 6],
+            actor_port_priority=0xf333,
+            actor_port_num=0xf444,
+            actor_key=0xf555,
+            convergence_status=ofp.LACP_OUT_OF_SYNC,
+            partner_sys_priority=0xf666,
+            partner_sys_mac=[0xa, 0xb, 0xc, 0xd, 0xe, 0xf],
+            partner_port_priority=0xf777,
+            partner_port_num=0xf888,
+            partner_key=0xf999)])
+-- c
+obj = of_bsn_lacp_stats_reply_new(OF_VERSION_1_3);
+of_bsn_lacp_stats_reply_xid_set(obj, 0x12345678);
+{
+    of_object_t *entries = of_list_bsn_lacp_stats_entry_new(OF_VERSION_1_3);
+    {
+        of_object_t *elem = of_bsn_lacp_stats_entry_new(OF_VERSION_1_3);
+        of_bsn_lacp_stats_entry_port_no_set(elem, 0xf111);
+        of_bsn_lacp_stats_entry_actor_sys_priority_set(elem, 0xf222);
+        {
+            of_mac_addr_t mac = { { 1, 2, 3, 4, 5, 6 } };
+            of_bsn_lacp_stats_entry_actor_sys_mac_set(elem, mac);
+        }
+        of_bsn_lacp_stats_entry_actor_port_priority_set(elem, 0xf333);
+        of_bsn_lacp_stats_entry_actor_port_num_set(elem, 0xf444);
+        of_bsn_lacp_stats_entry_actor_key_set(elem, 0xf555);
+        of_bsn_lacp_stats_entry_partner_sys_priority_set(elem, 0xf666);
+        of_bsn_lacp_stats_entry_convergence_status_set(elem, LACP_OUT_OF_SYNC);
+        {
+            of_mac_addr_t mac = { { 0xa, 0xb, 0xc, 0xd, 0xe, 0xf } };
+            of_bsn_lacp_stats_entry_partner_sys_mac_set(elem, mac);
+        }
+        of_bsn_lacp_stats_entry_partner_port_priority_set(elem, 0xf777);
+        of_bsn_lacp_stats_entry_partner_port_num_set(elem, 0xf888);
+        of_bsn_lacp_stats_entry_partner_key_set(elem, 0xf999);
+        of_list_append(entries, elem);
+        of_object_delete(elem);
+    }
+    of_bsn_lacp_stats_reply_entries_set(obj, entries);
+    of_object_delete(entries);
+}
diff --git a/test_data/of13/bsn_lacp_stats_request.data b/test_data/of13/bsn_lacp_stats_request.data
new file mode 100644
index 0000000..34aaf94
--- /dev/null
+++ b/test_data/of13/bsn_lacp_stats_request.data
@@ -0,0 +1,18 @@
+-- binary
+04 12 # version, type
+00 18 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 1 # subtype
+-- python
+ofp.message.bsn_lacp_stats_request(
+    xid=0x12345678,
+    flags=0)
+-- java
+builder.setXid(0x12345678)
+-- c
+obj = of_bsn_lacp_stats_request_new(OF_VERSION_1_3);
+of_bsn_lacp_stats_request_xid_set(obj, 0x12345678);
diff --git a/test_data/of13/bsn_port_counter_stats_reply.data b/test_data/of13/bsn_port_counter_stats_reply.data
new file mode 100644
index 0000000..15e98b4
--- /dev/null
+++ b/test_data/of13/bsn_port_counter_stats_reply.data
@@ -0,0 +1,42 @@
+-- binary
+04 13 # version, type
+00 50 # length
+12 34 56 78 # xid
+ff ff # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 5c 16 c7 # experimenter
+00 00 00 8 # subtype
+# entries[0]
+00 18 # length
+00 00 # pad
+00 00 00 03 # port
+12 34 56 78 9a bc de f0 # values[0]
+11 22 33 44 55 66 77 88 # values[1]
+# entries[0]
+00 20 # length
+00 00 # pad
+00 00 00 04 # port
+12 34 56 78 9a bc de f0 # values[0]
+11 22 33 44 55 66 77 88 # values[1]
+ff ff ff ff ff ff ff ff # values[2]
+-- python
+ofp.message.bsn_port_counter_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    entries=[
+        ofp.bsn_port_counter_stats_entry(
+            port_no=3,
+            values=[
+                ofp.uint64(0x123456789abcdef0),
+                ofp.uint64(0x1122334455667788),
+            ]),
+        ofp.bsn_port_counter_stats_entry(
+            port_no=4,
+            values=[
+                ofp.uint64(0x123456789abcdef0),
+                ofp.uint64(0x1122334455667788),
+                ofp.uint64(0xffffffffffffffff),
+            ])
+    ]
+)
diff --git a/test_data/of13/bsn_set_aux_cxns_reply.data b/test_data/of13/bsn_set_aux_cxns_reply.data
new file mode 100644
index 0000000..d385d84
--- /dev/null
+++ b/test_data/of13/bsn_set_aux_cxns_reply.data
@@ -0,0 +1,20 @@
+-- binary
+04 04 # version, type
+00 18 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 3b # subtype
+00 00 00 01 # num_aux
+00 00 00 00 # status
+-- python
+ofp.message.bsn_set_aux_cxns_reply(
+    xid=0x12345678, num_aux=1, status=0)
+-- java
+builder.setXid(0x12345678)
+        .setNumAux(1)
+        .setStatus(0)
+-- c
+obj = of_bsn_set_aux_cxns_reply_new(OF_VERSION_1_3);
+of_bsn_set_aux_cxns_reply_xid_set(obj, 0x12345678);
+of_bsn_set_aux_cxns_reply_num_aux_set(obj, 1);
+of_bsn_set_aux_cxns_reply_status_set(obj, 0);
diff --git a/test_data/of13/bsn_set_aux_cxns_request.data b/test_data/of13/bsn_set_aux_cxns_request.data
new file mode 100644
index 0000000..ee48d98
--- /dev/null
+++ b/test_data/of13/bsn_set_aux_cxns_request.data
@@ -0,0 +1,17 @@
+-- binary
+04 04 # version, type
+00 14 # length
+12 34 56 78 # xid
+00 5c 16 c7 # experimenter
+00 00 00 3A # subtype
+00 00 00 01 # num_aux
+-- python
+ofp.message.bsn_set_aux_cxns_request(
+    xid=0x12345678, num_aux=1)
+-- java
+builder.setXid(0x12345678)
+       .setNumAux(1)
+-- c
+obj = of_bsn_set_aux_cxns_request_new(OF_VERSION_1_3);
+of_bsn_set_aux_cxns_request_xid_set(obj, 0x12345678);
+of_bsn_set_aux_cxns_request_num_aux_set(obj, 1);
diff --git a/test_data/of13/bsn_tlv_port.data b/test_data/of13/bsn_tlv_port.data
new file mode 100644
index 0000000..303b0ec
--- /dev/null
+++ b/test_data/of13/bsn_tlv_port.data
@@ -0,0 +1,6 @@
+-- binary
+00 00 # type
+00 08 # length
+00 00 00 05 # value
+-- python
+ofp.bsn_tlv.port(5)
diff --git a/test_data/of13/echo_reply.data b/test_data/of13/echo_reply.data
new file mode 100644
index 0000000..4d9156d
--- /dev/null
+++ b/test_data/of13/echo_reply.data
@@ -0,0 +1,9 @@
+-- binary
+04 03 # version, type
+00 0b # length
+12 34 56 78 # xid
+61 62 63 # data
+-- python
+ofp.message.echo_reply(
+    xid=0x12345678,
+    data="abc")
diff --git a/test_data/of13/echo_request.data b/test_data/of13/echo_request.data
new file mode 100644
index 0000000..4cedbc7
--- /dev/null
+++ b/test_data/of13/echo_request.data
@@ -0,0 +1,9 @@
+-- binary
+04 02 # version, type
+00 0b # length
+12 34 56 78 # xid
+61 62 63 # data
+-- python
+ofp.message.echo_request(
+    xid=0x12345678,
+    data="abc")
diff --git a/test_data/of13/features_reply.data b/test_data/of13/features_reply.data
new file mode 100644
index 0000000..6daf046
--- /dev/null
+++ b/test_data/of13/features_reply.data
@@ -0,0 +1,20 @@
+-- binary
+04 06 # version, type
+00 20 # length
+12 34 56 78 # xid
+fe dc ba 98 76 54 32 10 # datapath_id
+00 00 00 40 # n_buffers
+c8 # n_tables
+05 # auxiliary_id
+00 00 # pad
+00 00 01 01 # capabilities
+00 00 00 00 # reserved
+-- python
+ofp.message.features_reply(
+    xid=0x12345678,
+    datapath_id=0xFEDCBA9876543210,
+    n_buffers=64,
+    n_tables=200,
+    auxiliary_id=5,
+    capabilities=ofp.OFPC_FLOW_STATS|ofp.OFPC_PORT_BLOCKED,
+    reserved=0)
diff --git a/test_data/of13/features_request.data b/test_data/of13/features_request.data
new file mode 100644
index 0000000..2f85f16
--- /dev/null
+++ b/test_data/of13/features_request.data
@@ -0,0 +1,6 @@
+-- binary
+04 05 # version, type
+00 08 # length
+12 34 56 78 # xid
+-- python
+ofp.message.features_request(xid=0x12345678)
diff --git a/test_data/of13/flow_add.data b/test_data/of13/flow_add.data
new file mode 100644
index 0000000..d566096
--- /dev/null
+++ b/test_data/of13/flow_add.data
@@ -0,0 +1,102 @@
+-- binary
+04 0e # version, type
+00 80 # length
+12 34 56 78 # xid
+
+fe dc ba 98 76 54 32 10 # cookie
+
+ff 00 ff 00 ff 00 ff 00 # cookie_mask
+
+03 # table_id
+00 # _command
+00 05 # idle_timeout
+00 0a # hard_timeout
+17 70 # priority
+
+00 00 00 32 # buffer_id
+00 00 00 06 # out_port
+
+00 00 00 08 # out_group
+00 00 # flags
+00 00 # pad
+
+00 01 # match.type
+00 3F # match.length # 59 bytes OXMs + 4 bytes match header
+
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+
+80 00 0A 02 # match.oxm_list[1].type_len - ETH_TYPE
+86 DD # match.oxm_list[1].value - ETH_TYPE = IPv6
+
+80 00 14 01 # match.oxm_list[2].type_len - IP Proto
+06 # match.oxm_list[2].value = IP_PROTO = TCP
+
+80 00 35 20 # match.oxm_list[3].type_len - IPV6_SRC
+1C CA FE 1C B1 10 1C 00 00 28 00 00 00 00 00 00 # match.oxm_list[3].value
+FF FF FF FF FF F0 FF FF 1C 2C 3C 00 00 00 00 00 # match.oxm_list[3].mask
+
+00 # match.pad
+
+00 01 # instructions[0].type
+00 08 # instructions[0].length
+04 # instructions[0].table_id
+00 00 00 # pad
+
+00 01 # instructions[1].type
+00 08 # instructions[1].length
+07 # instructions[1].table_id
+00 00 00 # pad
+-- python
+ofp.message.flow_add(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    cookie_mask=0xFF00FF00FF00FF00,
+    table_id=3,
+    idle_timeout=5,
+    hard_timeout=10,
+    priority=6000,
+    buffer_id=50,
+    out_port=6,
+    out_group=8,
+    flags=0,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.eth_type(value=0x86dd),
+        ofp.oxm.ip_proto(value=6),
+        ofp.oxm.ipv6_src_masked(
+            value     ='\x1C\xCA\xFE\x1C\xB1\x10\x1C\x00\x00\x28\x00\x00\x00\x00\x00\x00',
+            value_mask='\xFF\xFF\xFF\xFF\xFF\xF0\xFF\xFF\x1C\x2C\x3C\x00\x00\x00\x00\x00')
+        ]),
+    instructions=[
+        ofp.instruction.goto_table(table_id=4),
+        ofp.instruction.goto_table(table_id=7)])
+-- java
+builder.setXid(0x12345678)
+    .setCookie(U64.parseHex("FEDCBA9876543210"))
+    .setCookieMask(U64.parseHex("FF00FF00FF00FF00"))
+    .setTableId(TableId.of(3))
+    .setIdleTimeout(5)
+    .setHardTimeout(10)
+    .setPriority(6000)
+    .setBufferId(OFBufferId.of(50))
+    .setOutPort(OFPort.of(6))
+    .setOutGroup(OFGroup.of(8))
+    .setFlags(ImmutableSet.<OFFlowModFlags>of())
+    .setMatch(
+        factory.buildMatch()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv6)
+            .setExact(MatchField.IP_PROTO, IpProtocol.TCP)
+            .setMasked(MatchField.IPV6_SRC, 
+                       IPv6Address.of(0x1CCAFE1CB1101C00l, 0x0028000000000000l),
+                       IPv6Address.of(0xFFFFFFFFFFF0FFFFl, 0x1C2C3C0000000000l))
+        	.build()
+    )
+    .setInstructions(
+        ImmutableList.<OFInstruction>of(
+                factory.instructions().gotoTable(TableId.of(4)),
+                factory.instructions().gotoTable(TableId.of(7))
+        )
+    );
diff --git a/test_data/of13/flow_delete.data b/test_data/of13/flow_delete.data
new file mode 100644
index 0000000..bf9c453
--- /dev/null
+++ b/test_data/of13/flow_delete.data
@@ -0,0 +1,102 @@
+-- binary
+04 0e # version, type
+00 80 # length
+12 34 56 78 # xid
+
+fe dc ba 98 76 54 32 10 # cookie
+
+ff 00 ff 00 ff 00 ff 00 # cookie_mask
+
+03 # table_id
+03 # _command
+00 05 # idle_timeout
+00 0a # hard_timeout
+17 70 # priority
+
+00 00 00 32 # buffer_id
+00 00 00 06 # out_port
+
+00 00 00 08 # out_group
+00 00 # flags
+00 00 # pad
+
+00 01 # match.type
+00 3F # match.length # 59 bytes OXMs + 4 bytes match header
+
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+
+80 00 0A 02 # match.oxm_list[1].type_len - ETH_TYPE
+86 DD # match.oxm_list[1].value - ETH_TYPE = IPv6
+
+80 00 14 01 # match.oxm_list[2].type_len - IP Proto
+06 # match.oxm_list[2].value = IP_PROTO = TCP
+
+80 00 35 20 # match.oxm_list[3].type_len - IPV6_SRC
+1C CA FE 1C B1 10 1C 00 00 28 00 00 00 00 00 00 # match.oxm_list[3].value
+FF FF FF FF FF F0 FF FF 1C 2C 3C 00 00 00 00 00 # match.oxm_list[3].mask
+
+00 # match.pad
+
+00 01 # instructions[0].type
+00 08 # instructions[0].length
+04 # instructions[0].table_id
+00 00 00 # pad
+
+00 01 # instructions[1].type
+00 08 # instructions[1].length
+07 # instructions[1].table_id
+00 00 00 # pad
+-- python
+ofp.message.flow_delete(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    cookie_mask=0xFF00FF00FF00FF00,
+    table_id=3,
+    idle_timeout=5,
+    hard_timeout=10,
+    priority=6000,
+    buffer_id=50,
+    out_port=6,
+    out_group=8,
+    flags=0,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.eth_type(value=0x86dd),
+        ofp.oxm.ip_proto(value=6),
+        ofp.oxm.ipv6_src_masked(
+            value     ='\x1C\xCA\xFE\x1C\xB1\x10\x1C\x00\x00\x28\x00\x00\x00\x00\x00\x00',
+            value_mask='\xFF\xFF\xFF\xFF\xFF\xF0\xFF\xFF\x1C\x2C\x3C\x00\x00\x00\x00\x00')
+        ]),
+    instructions=[
+        ofp.instruction.goto_table(table_id=4),
+        ofp.instruction.goto_table(table_id=7)])
+-- java
+builder.setXid(0x12345678)
+    .setCookie(U64.parseHex("FEDCBA9876543210"))
+    .setCookieMask(U64.parseHex("FF00FF00FF00FF00"))
+    .setTableId(TableId.of(3))
+    .setIdleTimeout(5)
+    .setHardTimeout(10)
+    .setPriority(6000)
+    .setBufferId(OFBufferId.of(50))
+    .setOutPort(OFPort.of(6))
+    .setOutGroup(OFGroup.of(8))
+    .setFlags(ImmutableSet.<OFFlowModFlags>of())
+    .setMatch(
+        factory.buildMatch()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv6)
+            .setExact(MatchField.IP_PROTO, IpProtocol.TCP)
+            .setMasked(MatchField.IPV6_SRC, 
+                       IPv6Address.of(0x1CCAFE1CB1101C00l, 0x0028000000000000l),
+                       IPv6Address.of(0xFFFFFFFFFFF0FFFFl, 0x1C2C3C0000000000l))
+        	.build()
+    )
+    .setInstructions(
+        ImmutableList.<OFInstruction>of(
+                factory.instructions().gotoTable(TableId.of(4)),
+                factory.instructions().gotoTable(TableId.of(7))
+        )
+    );
diff --git a/test_data/of13/flow_delete_strict.data b/test_data/of13/flow_delete_strict.data
new file mode 100644
index 0000000..c33e4f8
--- /dev/null
+++ b/test_data/of13/flow_delete_strict.data
@@ -0,0 +1,102 @@
+-- binary
+04 0e # version, type
+00 80 # length
+12 34 56 78 # xid
+
+fe dc ba 98 76 54 32 10 # cookie
+
+ff 00 ff 00 ff 00 ff 00 # cookie_mask
+
+03 # table_id
+04 # _command
+00 05 # idle_timeout
+00 0a # hard_timeout
+17 70 # priority
+
+00 00 00 32 # buffer_id
+00 00 00 06 # out_port
+
+00 00 00 08 # out_group
+00 00 # flags
+00 00 # pad
+
+00 01 # match.type
+00 3F # match.length # 59 bytes OXMs + 4 bytes match header
+
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+
+80 00 0A 02 # match.oxm_list[1].type_len - ETH_TYPE
+86 DD # match.oxm_list[1].value - ETH_TYPE = IPv6
+
+80 00 14 01 # match.oxm_list[2].type_len - IP Proto
+06 # match.oxm_list[2].value = IP_PROTO = TCP
+
+80 00 35 20 # match.oxm_list[3].type_len - IPV6_SRC
+1C CA FE 1C B1 10 1C 00 00 28 00 00 00 00 00 00 # match.oxm_list[3].value
+FF FF FF FF FF F0 FF FF 1C 2C 3C 00 00 00 00 00 # match.oxm_list[3].mask
+
+00 # match.pad
+
+00 01 # instructions[0].type
+00 08 # instructions[0].length
+04 # instructions[0].table_id
+00 00 00 # pad
+
+00 01 # instructions[1].type
+00 08 # instructions[1].length
+07 # instructions[1].table_id
+00 00 00 # pad
+-- python
+ofp.message.flow_delete_strict(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    cookie_mask=0xFF00FF00FF00FF00,
+    table_id=3,
+    idle_timeout=5,
+    hard_timeout=10,
+    priority=6000,
+    buffer_id=50,
+    out_port=6,
+    out_group=8,
+    flags=0,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.eth_type(value=0x86dd),
+        ofp.oxm.ip_proto(value=6),
+        ofp.oxm.ipv6_src_masked(
+            value     ='\x1C\xCA\xFE\x1C\xB1\x10\x1C\x00\x00\x28\x00\x00\x00\x00\x00\x00',
+            value_mask='\xFF\xFF\xFF\xFF\xFF\xF0\xFF\xFF\x1C\x2C\x3C\x00\x00\x00\x00\x00')
+        ]),
+    instructions=[
+        ofp.instruction.goto_table(table_id=4),
+        ofp.instruction.goto_table(table_id=7)])
+-- java
+builder.setXid(0x12345678)
+    .setCookie(U64.parseHex("FEDCBA9876543210"))
+    .setCookieMask(U64.parseHex("FF00FF00FF00FF00"))
+    .setTableId(TableId.of(3))
+    .setIdleTimeout(5)
+    .setHardTimeout(10)
+    .setPriority(6000)
+    .setBufferId(OFBufferId.of(50))
+    .setOutPort(OFPort.of(6))
+    .setOutGroup(OFGroup.of(8))
+    .setFlags(ImmutableSet.<OFFlowModFlags>of())
+    .setMatch(
+        factory.buildMatch()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv6)
+            .setExact(MatchField.IP_PROTO, IpProtocol.TCP)
+            .setMasked(MatchField.IPV6_SRC, 
+                       IPv6Address.of(0x1CCAFE1CB1101C00l, 0x0028000000000000l),
+                       IPv6Address.of(0xFFFFFFFFFFF0FFFFl, 0x1C2C3C0000000000l))
+        	.build()
+    )
+    .setInstructions(
+        ImmutableList.<OFInstruction>of(
+                factory.instructions().gotoTable(TableId.of(4)),
+                factory.instructions().gotoTable(TableId.of(7))
+        )
+    );
diff --git a/test_data/of13/flow_modify.data b/test_data/of13/flow_modify.data
new file mode 100644
index 0000000..6f3ebee
--- /dev/null
+++ b/test_data/of13/flow_modify.data
@@ -0,0 +1,102 @@
+-- binary
+04 0e # version, type
+00 80 # length
+12 34 56 78 # xid
+
+fe dc ba 98 76 54 32 10 # cookie
+
+ff 00 ff 00 ff 00 ff 00 # cookie_mask
+
+03 # table_id
+01 # _command
+00 05 # idle_timeout
+00 0a # hard_timeout
+17 70 # priority
+
+00 00 00 32 # buffer_id
+00 00 00 06 # out_port
+
+00 00 00 08 # out_group
+00 00 # flags
+00 00 # pad
+
+00 01 # match.type
+00 3F # match.length # 59 bytes OXMs + 4 bytes match header
+
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+
+80 00 0A 02 # match.oxm_list[1].type_len - ETH_TYPE
+86 DD # match.oxm_list[1].value - ETH_TYPE = IPv6
+
+80 00 14 01 # match.oxm_list[2].type_len - IP Proto
+06 # match.oxm_list[2].value = IP_PROTO = TCP
+
+80 00 35 20 # match.oxm_list[3].type_len - IPV6_SRC
+1C CA FE 1C B1 10 1C 00 00 28 00 00 00 00 00 00 # match.oxm_list[3].value
+FF FF FF FF FF F0 FF FF 1C 2C 3C 00 00 00 00 00 # match.oxm_list[3].mask
+
+00 # match.pad
+
+00 01 # instructions[0].type
+00 08 # instructions[0].length
+04 # instructions[0].table_id
+00 00 00 # pad
+
+00 01 # instructions[1].type
+00 08 # instructions[1].length
+07 # instructions[1].table_id
+00 00 00 # pad
+-- python
+ofp.message.flow_modify(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    cookie_mask=0xFF00FF00FF00FF00,
+    table_id=3,
+    idle_timeout=5,
+    hard_timeout=10,
+    priority=6000,
+    buffer_id=50,
+    out_port=6,
+    out_group=8,
+    flags=0,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.eth_type(value=0x86dd),
+        ofp.oxm.ip_proto(value=6),
+        ofp.oxm.ipv6_src_masked(
+            value     ='\x1C\xCA\xFE\x1C\xB1\x10\x1C\x00\x00\x28\x00\x00\x00\x00\x00\x00',
+            value_mask='\xFF\xFF\xFF\xFF\xFF\xF0\xFF\xFF\x1C\x2C\x3C\x00\x00\x00\x00\x00')
+        ]),
+    instructions=[
+        ofp.instruction.goto_table(table_id=4),
+        ofp.instruction.goto_table(table_id=7)])
+-- java
+builder.setXid(0x12345678)
+    .setCookie(U64.parseHex("FEDCBA9876543210"))
+    .setCookieMask(U64.parseHex("FF00FF00FF00FF00"))
+    .setTableId(TableId.of(3))
+    .setIdleTimeout(5)
+    .setHardTimeout(10)
+    .setPriority(6000)
+    .setBufferId(OFBufferId.of(50))
+    .setOutPort(OFPort.of(6))
+    .setOutGroup(OFGroup.of(8))
+    .setFlags(ImmutableSet.<OFFlowModFlags>of())
+    .setMatch(
+        factory.buildMatch()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv6)
+            .setExact(MatchField.IP_PROTO, IpProtocol.TCP)
+            .setMasked(MatchField.IPV6_SRC, 
+                       IPv6Address.of(0x1CCAFE1CB1101C00l, 0x0028000000000000l),
+                       IPv6Address.of(0xFFFFFFFFFFF0FFFFl, 0x1C2C3C0000000000l))
+        	.build()
+    )
+    .setInstructions(
+        ImmutableList.<OFInstruction>of(
+                factory.instructions().gotoTable(TableId.of(4)),
+                factory.instructions().gotoTable(TableId.of(7))
+        )
+    );
diff --git a/test_data/of13/flow_modify_strict.data b/test_data/of13/flow_modify_strict.data
new file mode 100644
index 0000000..26e55a4
--- /dev/null
+++ b/test_data/of13/flow_modify_strict.data
@@ -0,0 +1,102 @@
+-- binary
+04 0e # version, type
+00 80 # length
+12 34 56 78 # xid
+
+fe dc ba 98 76 54 32 10 # cookie
+
+ff 00 ff 00 ff 00 ff 00 # cookie_mask
+
+03 # table_id
+02 # _command
+00 05 # idle_timeout
+00 0a # hard_timeout
+17 70 # priority
+
+00 00 00 32 # buffer_id
+00 00 00 06 # out_port
+
+00 00 00 08 # out_group
+00 00 # flags
+00 00 # pad
+
+00 01 # match.type
+00 3F # match.length # 59 bytes OXMs + 4 bytes match header
+
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+
+80 00 0A 02 # match.oxm_list[1].type_len - ETH_TYPE
+86 DD # match.oxm_list[1].value - ETH_TYPE = IPv6
+
+80 00 14 01 # match.oxm_list[2].type_len - IP Proto
+06 # match.oxm_list[2].value = IP_PROTO = TCP
+
+80 00 35 20 # match.oxm_list[3].type_len - IPV6_SRC
+1C CA FE 1C B1 10 1C 00 00 28 00 00 00 00 00 00 # match.oxm_list[3].value
+FF FF FF FF FF F0 FF FF 1C 2C 3C 00 00 00 00 00 # match.oxm_list[3].mask
+
+00 # match.pad
+
+00 01 # instructions[0].type
+00 08 # instructions[0].length
+04 # instructions[0].table_id
+00 00 00 # pad
+
+00 01 # instructions[1].type
+00 08 # instructions[1].length
+07 # instructions[1].table_id
+00 00 00 # pad
+-- python
+ofp.message.flow_modify_strict(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    cookie_mask=0xFF00FF00FF00FF00,
+    table_id=3,
+    idle_timeout=5,
+    hard_timeout=10,
+    priority=6000,
+    buffer_id=50,
+    out_port=6,
+    out_group=8,
+    flags=0,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.eth_type(value=0x86dd),
+        ofp.oxm.ip_proto(value=6),
+        ofp.oxm.ipv6_src_masked(
+            value     ='\x1C\xCA\xFE\x1C\xB1\x10\x1C\x00\x00\x28\x00\x00\x00\x00\x00\x00',
+            value_mask='\xFF\xFF\xFF\xFF\xFF\xF0\xFF\xFF\x1C\x2C\x3C\x00\x00\x00\x00\x00')
+        ]),
+    instructions=[
+        ofp.instruction.goto_table(table_id=4),
+        ofp.instruction.goto_table(table_id=7)])
+-- java
+builder.setXid(0x12345678)
+    .setCookie(U64.parseHex("FEDCBA9876543210"))
+    .setCookieMask(U64.parseHex("FF00FF00FF00FF00"))
+    .setTableId(TableId.of(3))
+    .setIdleTimeout(5)
+    .setHardTimeout(10)
+    .setPriority(6000)
+    .setBufferId(OFBufferId.of(50))
+    .setOutPort(OFPort.of(6))
+    .setOutGroup(OFGroup.of(8))
+    .setFlags(ImmutableSet.<OFFlowModFlags>of())
+    .setMatch(
+        factory.buildMatch()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ETH_TYPE, EthType.IPv6)
+            .setExact(MatchField.IP_PROTO, IpProtocol.TCP)
+            .setMasked(MatchField.IPV6_SRC, 
+                       IPv6Address.of(0x1CCAFE1CB1101C00l, 0x0028000000000000l),
+                       IPv6Address.of(0xFFFFFFFFFFF0FFFFl, 0x1C2C3C0000000000l))
+        	.build()
+    )
+    .setInstructions(
+        ImmutableList.<OFInstruction>of(
+                factory.instructions().gotoTable(TableId.of(4)),
+                factory.instructions().gotoTable(TableId.of(7))
+        )
+    );
diff --git a/test_data/of13/flow_removed.data b/test_data/of13/flow_removed.data
new file mode 100644
index 0000000..b6d73be
--- /dev/null
+++ b/test_data/of13/flow_removed.data
@@ -0,0 +1,39 @@
+-- binary
+04 0b # version, type
+00 48 # length
+12 34 56 78 # xid
+fe dc ba 98 76 54 32 10 # cookie
+42 68 # priority
+02 # reason
+14 # table_id
+00 00 00 0a # duration_sec
+00 00 03 e8 # duration_nsec
+00 05 # idle_timeout
+00 1e # hard_timeout
+00 00 00 00 00 00 00 01 # packet_count
+00 00 00 00 00 00 00 02 # byte_count
+00 01 # match.type
+00 16 # match.length
+80 00 01 08 # match.oxm_list[0].type_len - IN_PORT
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+80 00 2A 02 # match.oxm_list[1].type_len - ARP_OP
+00 01 # match.oxm_list[1].value
+00 00 # match.pad
+-- python
+ofp.message.flow_removed(
+    xid=0x12345678,
+    cookie=0xFEDCBA9876543210,
+    priority=17000,
+    reason=ofp.OFPRR_DELETE,
+    table_id=20,
+    duration_sec=10,
+    duration_nsec=1000,
+    idle_timeout=5,
+    hard_timeout=30,
+    packet_count=1,
+    byte_count=2,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.arp_op(value=1)
+    ]))
diff --git a/test_data/of13/get_config_reply.data b/test_data/of13/get_config_reply.data
new file mode 100644
index 0000000..66ea834
--- /dev/null
+++ b/test_data/of13/get_config_reply.data
@@ -0,0 +1,16 @@
+-- binary
+04 08 # version, type
+00 0c # length
+12 34 56 78 # xid
+00 02 # flags
+ff ff # miss_send_len
+-- python
+ofp.message.get_config_reply(
+    xid=0x12345678,
+    flags=ofp.OFPC_FRAG_REASM,
+    miss_send_len=0xffff)
+-- java
+builder.setXid(0x12345678)
+    .setFlags(Sets.immutableEnumSet(OFConfigFlags.FRAG_REASM))
+    .setMissSendLen(0xffff)
+    .build()
diff --git a/test_data/of13/get_config_request.data b/test_data/of13/get_config_request.data
new file mode 100644
index 0000000..8361a6b
--- /dev/null
+++ b/test_data/of13/get_config_request.data
@@ -0,0 +1,6 @@
+-- binary
+04 07 # version, type
+00 08 # length
+12 34 56 78 # xid
+-- python
+ofp.message.get_config_request(xid=0x12345678)
diff --git a/test_data/of13/group_desc_stats_reply.data b/test_data/of13/group_desc_stats_reply.data
new file mode 100644
index 0000000..5e95a67
--- /dev/null
+++ b/test_data/of13/group_desc_stats_reply.data
@@ -0,0 +1,69 @@
+-- binary
+04 13 # version, type
+00 80 # length
+12 34 56 78 # xid
+00 07 # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 68 # entries[0].length
+03 # entries[0].group_type
+00 # entries[0].pad
+00 00 00 01 # entries[0].group_id
+00 30 # entries[0].buckets[0].len
+00 01 # entries[0].buckets[0].weight
+00 00 00 05 # entries[0].buckets[0].watch_port
+ff ff ff ff # entries[0].buckets[0].watch_group
+00 00 00 00 # entries[0].pad
+00 00 # entries[0].buckets[0].actions[0].type
+00 10 # entries[0].buckets[0].actions[0].len
+00 00 00 05 # entries[0].buckets[0].actions[0].port
+00 00 # entries[0].buckets[0].actions[0].max_len
+00 00 00 00 00 00 # entries[0].pad
+00 00 # entries[0].buckets[0].actions[1].type
+00 10 # entries[0].buckets[0].actions[1].len
+00 00 00 06 # entries[0].buckets[0].actions[1].port
+00 00 # entries[0].buckets[0].actions[1].max_len
+00 00 00 00 00 00 # entries[0].pad
+00 30 # entries[0].buckets[1].len
+00 01 # entries[0].buckets[1].weight
+00 00 00 06 # entries[0].buckets[1].watch_port
+ff ff ff ff # entries[0].buckets[1].watch_group
+00 00 00 00 # entries[0].pad
+00 00 # entries[0].buckets[1].actions[0].type
+00 10 # entries[0].buckets[1].actions[0].len
+00 00 00 05 # entries[0].buckets[1].actions[0].port
+00 00 # entries[0].buckets[1].actions[0].max_len
+00 00 00 00 00 00 # entries[0].pad
+00 00 # entries[0].buckets[1].actions[1].type
+00 10 # entries[0].buckets[1].actions[1].len
+00 00 00 06 # entries[0].buckets[1].actions[1].port
+00 00 # entries[0].buckets[1].actions[1].max_len
+00 00 00 00 00 00 # entries[0].pad
+00 08 # entries[1].length
+03 # entries[1].group_type
+00 # entries[1].pad
+00 00 00 02 # entries[1].group_id
+-- python
+ofp.message.group_desc_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    entries=[
+        ofp.group_desc_stats_entry(
+            group_type=ofp.OFPGT_FF,
+            group_id=1,
+            buckets=[
+                ofp.bucket(
+                    weight=1,
+                    watch_port=5,
+                    watch_group=0xffffffff,
+                    actions=[
+                        ofp.action.output(port=5, max_len=0),
+                        ofp.action.output(port=6, max_len=0)]),
+                ofp.bucket(
+                    weight=1,
+                    watch_port=6,
+                    watch_group=0xffffffff,
+                    actions=[
+                        ofp.action.output(port=5, max_len=0),
+                        ofp.action.output(port=6, max_len=0)])]),
+        ofp.group_desc_stats_entry(group_type=ofp.OFPGT_FF, group_id=2, buckets=[])])
diff --git a/test_data/of13/group_modify.data b/test_data/of13/group_modify.data
new file mode 100644
index 0000000..87eee29
--- /dev/null
+++ b/test_data/of13/group_modify.data
@@ -0,0 +1,86 @@
+-- binary
+04 0f # version, type
+00 70 # length
+12 34 56 78 # xid
+00 01 # command
+03 # group_type
+00 # pad
+00 00 00 05 # group_id
+00 30 # buckets[0].len
+00 01 # buckets[0].weight
+00 00 00 05 # buckets[0].watch_port
+ff ff ff ff # buckets[0].watch_group
+00 00 00 00 # pad
+00 00 # buckets[0].actions[0].type
+00 10 # buckets[0].actions[0].len
+00 00 00 05 # buckets[0].actions[0].port
+00 00 # buckets[0].actions[0].max_len
+00 00 00 00 00 00 # pad
+00 00 # buckets[0].actions[1].type
+00 10 # buckets[0].actions[1].len
+00 00 00 06 # buckets[0].actions[1].port
+00 00 # buckets[0].actions[1].max_len
+00 00 00 00 00 00 # pad
+00 30 # buckets[1].len
+00 01 # buckets[1].weight
+00 00 00 06 # buckets[1].watch_port
+ff ff ff ff # buckets[1].watch_group
+00 00 00 00 # pad
+00 00 # buckets[1].actions[0].type
+00 10 # buckets[1].actions[0].len
+00 00 00 05 # buckets[1].actions[0].port
+00 00 # buckets[1].actions[0].max_len
+00 00 00 00 00 00 # pad
+00 00 # buckets[1].actions[1].type
+00 10 # buckets[1].actions[1].len
+00 00 00 06 # buckets[1].actions[1].port
+00 00 # buckets[1].actions[1].max_len
+00 00 00 00 00 00 # pad
+-- python
+ofp.message.group_modify(
+    xid=0x12345678,
+    group_type=ofp.OFPGT_FF,
+    group_id=5,
+    buckets=[
+        ofp.bucket(
+            weight=1,
+            watch_port=5,
+            watch_group=0xffffffff,
+            actions=[
+                ofp.action.output(port=5, max_len=0),
+                ofp.action.output(port=6, max_len=0)]),
+        ofp.bucket(
+            weight=1,
+            watch_port=6,
+            watch_group=0xffffffff,
+            actions=[
+                ofp.action.output(port=5, max_len=0),
+                ofp.action.output(port=6, max_len=0)])])
+-- java
+    OFActions actions = factory.actions();
+    builder
+      .setXid(0x12345678)
+      .setGroupType(OFGroupType.FF)
+      .setGroup(OFGroup.of(5))
+      .setBuckets(ImmutableList.<OFBucket>of(
+        factory.buildBucket()
+          .setWeight(1)
+          .setWatchPort(OFPort.of(5))
+          .setWatchGroup(OFGroup.ANY)
+          .setActions(ImmutableList.<OFAction>of(
+            actions.output(OFPort.of(5), 0),
+            actions.output(OFPort.of(6), 0)
+          ))
+          .build(),
+        factory.buildBucket()
+          .setWeight(1)
+          .setWatchPort(OFPort.of(6))
+          .setWatchGroup(OFGroup.ANY)
+          .setActions(ImmutableList.<OFAction>of(
+            actions.output(OFPort.of(5), 0),
+            actions.output(OFPort.of(6), 0)
+          ))
+          .build()
+         )
+      )
+      .build();
diff --git a/test_data/of13/group_stats_reply.data b/test_data/of13/group_stats_reply.data
new file mode 100644
index 0000000..b14eb74
--- /dev/null
+++ b/test_data/of13/group_stats_reply.data
@@ -0,0 +1,52 @@
+-- binary
+04 13 # version, type
+00 80 # length
+12 34 56 78 # xid
+00 06 # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 48 # entries[0].length
+00 00 # pad
+00 00 00 01 # entries[0].group_id
+00 00 00 08 # entries[0].ref_count
+00 00 00 00 # pad
+00 00 00 00 00 00 00 10 # entries[0].packet_count
+00 00 00 00 00 00 00 20 # entries[0].byte_count
+00 00 00 14 # entries[0].duration_sec
+00 00 00 64 # entries[0].duration_nsec
+00 00 00 00 00 00 00 01 # entries[0].bucket_stats[0].packet_count
+00 00 00 00 00 00 00 02 # entries[0].bucket_stats[0].byte_count
+00 00 00 00 00 00 00 03 # entries[0].bucket_stats[1].packet_count
+00 00 00 00 00 00 00 04 # entries[0].bucket_stats[1].byte_count
+00 28 # entries[0].length
+00 00 # pad
+00 00 00 01 # entries[0].group_id
+00 00 00 08 # entries[0].ref_count
+00 00 00 00 # pad
+00 00 00 00 00 00 00 10 # entries[0].packet_count
+00 00 00 00 00 00 00 20 # entries[0].byte_count
+00 00 00 14 # entries[0].duration_sec
+00 00 00 64 # entries[0].duration_nsec
+-- python
+ofp.message.group_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    entries=[
+        ofp.group_stats_entry(
+            group_id=1,
+            ref_count=8,
+            packet_count=16,
+            byte_count=32,
+            duration_sec=20,
+            duration_nsec=100,
+            bucket_stats=[
+                ofp.bucket_counter(packet_count=1, byte_count=2),
+                ofp.bucket_counter(packet_count=3, byte_count=4)]),
+        ofp.group_stats_entry(
+            group_id=1,
+            ref_count=8,
+            packet_count=16,
+            byte_count=32,
+            duration_sec=20,
+            duration_nsec=100,
+            bucket_stats=[])])
diff --git a/test_data/of13/hello.data b/test_data/of13/hello.data
new file mode 100644
index 0000000..94be823
--- /dev/null
+++ b/test_data/of13/hello.data
@@ -0,0 +1,20 @@
+-- binary
+04 00 # version, type
+00 20 # length
+12 34 56 78 # xid
+00 01 # elements[0].type
+00 0c # elements[0].length
+00 00 00 01 # elements[0].bitmaps[0]
+00 00 00 02 # elements[0].bitmaps[1]
+00 01 # elements[1].type
+00 0c # elements[1].length
+00 00 00 03 # elements[1].bitmaps[0]
+00 00 00 04 # elements[1].bitmaps[1]
+-- python
+ofp.message.hello(
+    xid=0x12345678,
+    elements=[
+        ofp.hello_elem_versionbitmap(
+            bitmaps=[ofp.uint32(1), ofp.uint32(2)]),
+        ofp.hello_elem_versionbitmap(
+            bitmaps=[ofp.uint32(3), ofp.uint32(4)])])
diff --git a/test_data/of13/hello_elem_versionbitmap.data b/test_data/of13/hello_elem_versionbitmap.data
new file mode 100644
index 0000000..230f7db
--- /dev/null
+++ b/test_data/of13/hello_elem_versionbitmap.data
@@ -0,0 +1,10 @@
+-- binary
+00 01 # type
+00 0c # length
+01 23 45 67 # bitmaps[0]
+89 ab cd ef # bitmaps[1]
+-- python
+ofp.hello_elem_versionbitmap(
+    bitmaps=[
+        ofp.uint32(0x01234567),
+        ofp.uint32(0x89abcdef)])
diff --git a/test_data/of13/instruction_bsn_disable_src_mac_check.data b/test_data/of13/instruction_bsn_disable_src_mac_check.data
new file mode 100644
index 0000000..6001fde
--- /dev/null
+++ b/test_data/of13/instruction_bsn_disable_src_mac_check.data
@@ -0,0 +1,11 @@
+-- binary
+ff ff # type
+00 10 # length
+00 5c 16 c7 # experimenter
+00 00 00 00 # subtype
+00 00 00 00 # pad
+-- python
+ofp.instruction.bsn_disable_src_mac_check()
+-- java
+-- c
+obj = of_instruction_bsn_disable_src_mac_check_new(OF_VERSION_1_3);
diff --git a/test_data/of13/instruction_goto_table.data b/test_data/of13/instruction_goto_table.data
new file mode 100644
index 0000000..6d0e32d
--- /dev/null
+++ b/test_data/of13/instruction_goto_table.data
@@ -0,0 +1,7 @@
+-- binary
+00 01 # type
+00 08 # length
+05 # table_id
+00 00 00 # pad
+-- python
+ofp.instruction.goto_table(table_id=5)
diff --git a/test_data/of13/match_v3.data b/test_data/of13/match_v3.data
new file mode 100644
index 0000000..8d42019
--- /dev/null
+++ b/test_data/of13/match_v3.data
@@ -0,0 +1,33 @@
+-- binary
+00 01 # type
+00 3C # length
+80 00 # oxm_list[0].class
+05 10 # oxm_list[0].type_len - METADATA
+FE DC BA 98 12 14 12 10 # oxm_list[0].value
+FF FF FF FF 12 34 56 78 # oxm_list[0].mask
+80 00 # oxm_list[1].class
+08 06 # oxm_list[1].type_len - ETH_SRC
+01 02 03 04 05 06 # oxm_list[1].value
+80 00 # oxm_list[2].class
+20 02 # oxm_list[2].type_len - UDP_DST
+00 35 # oxm_list[2].value
+80 00 # oxm_list[3].class
+36 10 # oxm_list[4].type_len - IPV6_DST
+12 12 12 12 12 12 12 12 # oxm_list[4].value
+12 12 12 12 12 12 12 12 # ...
+00 00 00 00 # pad
+-- python
+ofp.match([
+    ofp.oxm.metadata_masked(0xFEDCBA9812141210, 0xFFFFFFFF12345678),
+    ofp.oxm.eth_src([1,2,3,4,5,6]),
+    ofp.oxm.udp_dst(53),
+    ofp.oxm.ipv6_dst("\x12" * 16),
+])
+-- java
+builder
+       .setMasked(MatchField.METADATA, OFMetadata.ofRaw(0xFEDCBA9812141210l), OFMetadata.ofRaw(0xFFFFFFFF12345678l))
+       .setExact(MatchField.ETH_SRC, MacAddress.of(new byte[] {1,2,3,4,5,6}))
+       .setExact(MatchField.UDP_DST, TransportPort.of(53))
+       .setExact(MatchField.IPV6_DST, IPv6Address.of(new byte[] { 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 
+                                                                  0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12 }))
+                                                           
\ No newline at end of file
diff --git a/test_data/of13/meter_config_stats_reply.data b/test_data/of13/meter_config_stats_reply.data
new file mode 100644
index 0000000..940e4f5
--- /dev/null
+++ b/test_data/of13/meter_config_stats_reply.data
@@ -0,0 +1,25 @@
+-- binary
+04 13 # version, type
+00 30 # length
+12 34 56 78 # xid
+00 0a # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 01 # entries[0].type
+00 10 # entries[0].length
+00 00 00 01 # entries[0].rate
+00 00 00 02 # entries[0].burst_size
+00 00 00 00 # pad
+00 02 # entries[1].type
+00 10 # entries[1].length
+00 00 00 03 # entries[1].rate
+00 00 00 04 # entries[1].burst_size
+05 # entries[1].prec_level
+00 00 00 # pad
+-- python
+ofp.message.meter_config_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    entries=[
+        ofp.meter_band.drop(rate=1, burst_size=2),
+        ofp.meter_band.dscp_remark(rate=3, burst_size=4, prec_level=5)])
diff --git a/test_data/of13/meter_feature_stats_reply.data b/test_data/of13/meter_feature_stats_reply.data
new file mode 100644
index 0000000..4a1b9bf
--- /dev/null
+++ b/test_data/of13/meter_feature_stats_reply.data
@@ -0,0 +1,23 @@
+-- binary
+04 13 # version, type
+00 20 # length
+12 34 56 78 # xid
+00 0b # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 00 00 05 # max_meter
+00 00 00 03 # band_types
+00 00 00 09 # capabilities
+0a # max_bands
+07 # max_color
+00 00 # pad
+-- python
+ofp.message.meter_features_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    features=ofp.meter_features(
+        max_meter=5,
+        band_types=ofp.OFPMBT_DROP|ofp.OFPMBT_DSCP_REMARK,
+        capabilities=ofp.OFPMF_KBPS|ofp.OFPMF_STATS,
+        max_bands=10,
+        max_color=7))
diff --git a/test_data/of13/meter_stats_reply.data b/test_data/of13/meter_stats_reply.data
new file mode 100644
index 0000000..04f1fe8
--- /dev/null
+++ b/test_data/of13/meter_stats_reply.data
@@ -0,0 +1,50 @@
+-- binary
+04 13 # version, type
+00 80 # length
+12 34 56 78 # xid
+00 09 # stats_type
+00 00 # flags
+00 00 00 00 # pad
+00 00 00 01 # entries[0].meter_id
+00 48 # entries[0].len
+00 00 00 00 00 00 # pad
+00 00 00 08 # entries[0].flow_count
+00 00 00 00 00 00 00 10 # entries[0].packet_in_count
+00 00 00 00 00 00 00 20 # entries[0].byte_in_count
+00 00 00 14 # entries[0].duration_sec
+00 00 00 64 # entries[0].duration_nsec
+00 00 00 00 00 00 00 01 # entries[0].band_stats[0].packet_band_count
+00 00 00 00 00 00 00 02 # entries[0].band_stats[0].byte_band_count
+00 00 00 00 00 00 00 03 # entries[0].band_stats[1].packet_band_count
+00 00 00 00 00 00 00 04 # entries[0].band_stats[1].byte_band_count
+00 00 00 02 # entries[1].meter_id
+00 28 # entries[1].len
+00 00 00 00 00 00 # pad
+00 00 00 08 # entries[1].flow_count
+00 00 00 00 00 00 00 10 # entries[1].packet_in_count
+00 00 00 00 00 00 00 20 # entries[1].byte_in_count
+00 00 00 14 # entries[1].duration_sec
+00 00 00 64 # entries[1].duration_nsec
+-- python
+ofp.message.meter_stats_reply(
+    xid=0x12345678,
+    flags=0,
+    entries=[
+        ofp.meter_stats(
+            meter_id=1,
+            flow_count=8,
+            packet_in_count=16,
+            byte_in_count=32,
+            duration_sec=20,
+            duration_nsec=100,
+            band_stats=[
+                ofp.meter_band_stats(packet_band_count=1, byte_band_count=2),
+                ofp.meter_band_stats(packet_band_count=3, byte_band_count=4)]),
+        ofp.meter_stats(
+            meter_id=2,
+            flow_count=8,
+            packet_in_count=16,
+            byte_in_count=32,
+            duration_sec=20,
+            duration_nsec=100,
+            band_stats=[])])
diff --git a/test_data/of13/oxm_bsn_global_vrf_allowed.data b/test_data/of13/oxm_bsn_global_vrf_allowed.data
new file mode 100644
index 0000000..d5fe8d7
--- /dev/null
+++ b/test_data/of13/oxm_bsn_global_vrf_allowed.data
@@ -0,0 +1,12 @@
+-- binary
+00 03 # class
+06 # type/masked
+01 # length
+01 # value
+-- python
+ofp.oxm.bsn_global_vrf_allowed(1)
+-- c
+obj = of_oxm_bsn_global_vrf_allowed_new(OF_VERSION_1_3);
+of_oxm_bsn_global_vrf_allowed_value_set(obj, 1);
+-- java
+builder.setValue(OFBooleanValue.TRUE)
diff --git a/test_data/of13/oxm_bsn_in_ports_masked_128.data b/test_data/of13/oxm_bsn_in_ports_masked_128.data
new file mode 100644
index 0000000..9ef5245
--- /dev/null
+++ b/test_data/of13/oxm_bsn_in_ports_masked_128.data
@@ -0,0 +1,24 @@
+-- binary
+00 03 # class
+01 # type/masked
+20 # length
+00 00 00 00 00 00 00 00 # value
+00 00 00 00 00 00 00 00 # ...
+ff ff ff fe ff ff ff ff # mask - Only ports 0, 17, 96 are selected (and thus are zero)
+ff ff ff ff ff fd ff fe # ...
+-- python
+ofp.oxm.bsn_in_ports_128_masked(set(), set(range(0,128)) - set((0, 17,96)))
+-- c
+obj = of_oxm_bsn_in_ports_128_masked_new(OF_VERSION_1_3);
+{
+    of_bitmap_128_t bmap = { 0, 0 };
+    of_oxm_bsn_in_ports_128_masked_value_set(obj, bmap);
+}
+{
+    of_bitmap_128_t bmap = { 0xfffffffeffffffff , 0xfffffffffffdfffe };
+    of_oxm_bsn_in_ports_128_masked_value_mask_set(obj, bmap);
+}
+-- java
+OFPortMap portmap = OFPortMap.ofPorts(OFPort.of(0), OFPort.of(17), OFPort.of(96));
+builder.setValue(portmap.getValue());
+builder.setMask(portmap.getMask());
diff --git a/test_data/of13/oxm_bsn_l3_src_class_id.data b/test_data/of13/oxm_bsn_l3_src_class_id.data
new file mode 100644
index 0000000..5da07e3
--- /dev/null
+++ b/test_data/of13/oxm_bsn_l3_src_class_id.data
@@ -0,0 +1,12 @@
+-- binary
+00 03 # class
+0a # type/masked
+04 # length
+12 34 56 78 # value
+-- python
+ofp.oxm.bsn_l3_src_class_id(0x12345678)
+-- c
+obj = of_oxm_bsn_l3_src_class_id_new(OF_VERSION_1_3);
+of_oxm_bsn_l3_src_class_id_value_set(obj, 0x12345678);
+-- java
+builder.setValue(ClassId.of(0x12345678))
diff --git a/test_data/of13/oxm_bsn_lag_id.data b/test_data/of13/oxm_bsn_lag_id.data
new file mode 100644
index 0000000..e8b2fcd
--- /dev/null
+++ b/test_data/of13/oxm_bsn_lag_id.data
@@ -0,0 +1,12 @@
+-- binary
+00 03 # class
+02 # type/masked
+04 # length
+12 34 56 78 # value
+-- python
+ofp.oxm.bsn_lag_id(0x12345678)
+-- c
+obj = of_oxm_bsn_lag_id_new(OF_VERSION_1_3);
+of_oxm_bsn_lag_id_value_set(obj, 0x12345678);
+-- java
+builder.setValue(LagId.of(0x12345678))
diff --git a/test_data/of13/oxm_in_phy_port.data b/test_data/of13/oxm_in_phy_port.data
new file mode 100644
index 0000000..32ac1ea
--- /dev/null
+++ b/test_data/of13/oxm_in_phy_port.data
@@ -0,0 +1,7 @@
+-- binary
+80 00 # class
+02 # type/masked
+04 # length
+00 00 00 2a # value
+-- python
+ofp.oxm.in_phy_port(value=42)
diff --git a/test_data/of13/oxm_in_phy_port_masked.data b/test_data/of13/oxm_in_phy_port_masked.data
new file mode 100644
index 0000000..99b0ad3
--- /dev/null
+++ b/test_data/of13/oxm_in_phy_port_masked.data
@@ -0,0 +1,8 @@
+-- binary
+80 00 # class
+03 # type/masked
+08 # length
+00 00 00 2a # value
+aa bb cc dd # mask
+-- python
+ofp.oxm.in_phy_port_masked(value=42, value_mask=0xaabbccdd)
diff --git a/test_data/of13/oxm_ipv6_dst.data b/test_data/of13/oxm_ipv6_dst.data
new file mode 100644
index 0000000..23c8fb5
--- /dev/null
+++ b/test_data/of13/oxm_ipv6_dst.data
@@ -0,0 +1,8 @@
+-- binary
+80 00 # class
+36 # type/masked
+10 # length
+00 01 02 03 04 05 06 07 # value
+08 09 0a 0b 0c 0d 0e 0f # ...
+-- python
+ofp.oxm.ipv6_dst('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f')
diff --git a/test_data/of13/packet_in.data b/test_data/of13/packet_in.data
new file mode 100644
index 0000000..525d144
--- /dev/null
+++ b/test_data/of13/packet_in.data
@@ -0,0 +1,48 @@
+-- binary
+04 0a # version, type
+00 35 # length
+12 34 56 78 # xid
+00 00 00 64 # buffer_id
+42 68 # total_len
+01 # reason
+14 # table_id
+fe dc ba 98 76 54 32 10 # cookie
+00 01 # match.type
+00 16 # match.length
+80 00 01 08 # match.oxm_list[0].type_len - Input Port
+00 00 00 04 # match.oxm_list[0].value
+00 00 00 05 # match.oxm_list[0].mask
+80 00 2A 02 # match.oxm_list[1].type_len - ARP OpCode
+00 01 # match.oxm_list[1].value
+00 00 # match.pad
+00 00 # pad
+61 62 63 # data
+-- python
+ofp.message.packet_in(
+    xid=0x12345678,
+    buffer_id=100,
+    total_len=17000,
+    reason=ofp.OFPR_ACTION,
+    table_id=20,
+    cookie=0xFEDCBA9876543210,
+    match=ofp.match(oxm_list=[
+        ofp.oxm.in_port_masked(value=4, value_mask=5),
+        ofp.oxm.arp_op(value=1)
+        ]),
+    data="abc")
+-- java
+builder
+   .setXid(0x12345678)
+   .setBufferId(OFBufferId.of(100))
+   .setTotalLen(17000)
+   .setReason(OFPacketInReason.ACTION)
+   .setTableId(TableId.of(20))
+   .setCookie(U64.parseHex("FEDCBA9876543210"))
+   .setMatch(
+        factory.buildMatchV3()
+            .setMasked(MatchField.IN_PORT, OFPort.of(4), OFPort.of(5))
+            .setExact(MatchField.ARP_OP, ArpOpcode.REQUEST)
+        	.build()
+    )
+    .setData(new byte[] { 97, 98, 99 } );
+
diff --git a/test_data/of13/packet_out.data b/test_data/of13/packet_out.data
new file mode 100644
index 0000000..ae8fa64
--- /dev/null
+++ b/test_data/of13/packet_out.data
@@ -0,0 +1,26 @@
+-- binary
+04 0d # version, type
+00 33 # length
+12 34 56 78 # xid
+00 00 00 64 # buffer_id
+00 00 00 04 # in_port
+00 18 # actions_len
+00 00 00 00 00 00 # pad
+00 00 # actions[0].type
+00 10 # actions[0].length
+00 00 00 02 # actions[0].port
+ff ff # actions[0].max_len
+00 00 00 00 00 00 # pad
+00 18 # actions[1].type
+00 08 # actions[1].length
+00 00 00 00 # pad
+61 62 63 # data
+-- python
+ofp.message.packet_out(
+    xid=0x12345678,
+    buffer_id=100,
+    in_port=4,
+    actions=[
+        ofp.action.output(port=2, max_len=0xffff),
+        ofp.action.dec_nw_ttl()],
+    data="abc")
diff --git a/test_data/of13/port_status.data b/test_data/of13/port_status.data
new file mode 100644
index 0000000..10f7c16
--- /dev/null
+++ b/test_data/of13/port_status.data
@@ -0,0 +1,36 @@
+-- binary
+04 0c # version, type
+00 50 # length
+12 34 56 78 # xid
+02 # reason
+00 00 00 00 00 00 00 # pad
+00 00 00 04 # port_no
+00 00 00 00 # pad
+01 02 03 04 05 06 # hw_addr
+00 00 # pad
+66 6f 6f 00 00 00 00 00 # name
+00 00 00 00 00 00 00 00 # ...
+00 00 00 24 # config
+00 00 00 02 # state
+00 00 00 01 # curr
+00 00 00 02 # advertised
+00 00 00 04 # supported
+00 00 00 08 # peer
+00 00 00 0a # curr_speed
+00 00 00 14 # max_speed
+-- python
+ofp.message.port_status(
+    xid=0x12345678,
+    reason=ofp.OFPPR_MODIFY,
+    desc=ofp.port_desc(
+        port_no=4,
+        hw_addr=[1,2,3,4,5,6],
+        name="foo",
+        config=ofp.OFPPC_NO_FWD|ofp.OFPPC_NO_RECV,
+        state=ofp.OFPPS_BLOCKED,
+        curr=ofp.OFPPF_10MB_HD,
+        advertised=ofp.OFPPF_10MB_FD,
+        supported=ofp.OFPPF_100MB_HD,
+        peer=ofp.OFPPF_100MB_FD,
+        curr_speed=10,
+        max_speed=20))
diff --git a/test_data/of13/set_config.data b/test_data/of13/set_config.data
new file mode 100644
index 0000000..49c5be0
--- /dev/null
+++ b/test_data/of13/set_config.data
@@ -0,0 +1,11 @@
+-- binary
+04 09 # version, type
+00 0c # length
+12 34 56 78 # xid
+00 02 # flags
+ff ff # miss_send_len
+-- python
+ofp.message.set_config(
+    xid=0x12345678,
+    flags=ofp.OFPC_FRAG_REASM,
+    miss_send_len=0xffff)
diff --git a/utest/test_build_ir.py b/utest/test_build_ir.py
new file mode 100755
index 0000000..b6d87a3
--- /dev/null
+++ b/utest/test_build_ir.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import sys
+import os
+import unittest
+
+from nose.tools import eq_, ok_, raises
+
+root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
+sys.path.insert(0, root_dir)
+
+import loxi_ir.ir as ir
+import loxi_front_end.frontend_ir as fe
+
+class BuildIRTest(unittest.TestCase):
+
+    def test_simple(self):
+        version = ir.OFVersion("1.0", 1)
+        input = fe.OFInput(filename="test.dat",
+                    wire_versions=(1,),
+                    classes=(
+                      fe.OFClass(name="OFMessage",
+                                 superclass=None,
+                                 members=(
+                                     fe.OFDataMember(name='version', oftype='uint32_t'),
+                                     fe.OFLengthMember(name='length', oftype='uint16_t')
+                                 ),
+                                 virtual=False,
+                                 params={}
+                      ),
+                    ),
+                    enums=()
+                )
+
+        p = ir.build_protocol(version, [ input ])
+        eq_(1, len(p.classes))
+        c = p.classes[0]
+        eq_("OFMessage", c.name)
+        eq_(None, c.superclass)
+        eq_(False, c.virtual)
+        eq_({}, c.params)
+        eq_(2, len(c.members))
+        eq_(p, c.protocol)
+
+        m1 = c.members[0]
+        ok_(isinstance(m1, ir.OFDataMember))
+        eq_("version", m1.name)
+        eq_("uint32_t", m1.oftype)
+        eq_(4, m1.length)
+        eq_(True, m1.is_fixed_length)
+        eq_(0, m1.offset)
+        eq_(c, m1.of_class)
+
+        m2 = c.members[1]
+        ok_(isinstance(m2, ir.OFLengthMember))
+        eq_("length", m2.name)
+        eq_("uint16_t", m2.oftype)
+        eq_(2, m2.length)
+        eq_(True, m2.is_fixed_length)
+        eq_(4, m2.offset)
+        eq_(c, m2.of_class)
+
+        eq_(True, c.is_fixed_length)
+        eq_(6, c.length)
+
+    def test_resolve_superclass(self):
+        version = ir.OFVersion("1.0", 1)
+        input = fe.OFInput(filename="test.dat",
+                    wire_versions=(1,),
+                    classes=(
+                      fe.OFClass(name="OFMessage",
+                                 superclass=None,
+                                 members=(),
+                                 virtual=True,
+                                 params={}
+                      ),
+                      fe.OFClass(name="OFHello",
+                                 superclass="OFMessage",
+                                 members=(),
+                                 virtual=False,
+                                 params={}
+                      ),
+                    ),
+                    enums=()
+                )
+        p = ir.build_protocol(version, [ input ])
+        eq_(2, len(p.classes))
+        c, c2 = p.classes
+        eq_("OFMessage", c.name)
+        eq_(None, c.superclass)
+        eq_(True, c.virtual)
+        eq_("OFHello", c2.name)
+        eq_(c, c2.superclass)
+        eq_(False, c2.virtual)
+
+    @raises(ir.ClassNotFoundException)
+    def test_resolve_superclass(self):
+        version = ir.OFVersion("1.0", 1)
+        input = fe.OFInput(filename="test.dat",
+                    wire_versions=(1,),
+                    classes=(
+                      fe.OFClass(name="OFMessage",
+                                 superclass="NotFoundSuperClass",
+                                 members=(),
+                                 virtual=True,
+                                 params={}
+                      ),
+                   ),
+                    enums=()
+                )
+        p = ir.build_protocol(version, [ input ])
+
+
+    @raises(ir.DependencyCycleException)
+    def test_dependency_cycle(self):
+        version = ir.OFVersion("1.0", 1)
+        input = fe.OFInput(filename="test.dat",
+                    wire_versions=(1,),
+                    classes=(
+                      fe.OFClass(name="OFMessage",
+                                 superclass="OFHeader",
+                                 members=(),
+                                 virtual=True,
+                                 params={}
+                      ),
+                       fe.OFClass(name="OFHeader",
+                                 superclass="OFMessage",
+                                 members=(),
+                                 virtual=True,
+                                 params={}
+                      ),
+                   ),
+                    enums=()
+                )
+        p = ir.build_protocol(version, [ input ])
+
+    @raises(ir.RedefinedException)
+    def test_class_redefined(self):
+        version = ir.OFVersion("1.0", 1)
+        inputs = (
+            fe.OFInput(filename="test.dat",
+                    wire_versions=(1,),
+                    classes=(
+                      fe.OFClass(name="OFMessage",
+                                 superclass=None,
+                                 members=(),
+                                 virtual=True,
+                                 params={}
+                      ),
+                   ),
+                    enums=(),
+             ),
+             fe.OFInput(filename="test2.dat",
+                    wire_versions=(1,),
+                    classes=(
+                      fe.OFClass(name="OFMessage",
+                                 superclass=None,
+                                 members=(),
+                                 virtual=True,
+                                 params={}
+                      ),
+                  ),
+                    enums=()
+                )
+        )
+        p = ir.build_protocol(version, inputs)
+
+
+    def test_enums(self):
+        version = ir.OFVersion("1.0", 1)
+        input = fe.OFInput(filename="test.dat",
+                    wire_versions=(1,),
+                    classes=(),
+                    enums=(
+                        fe.OFEnum(name='ofp_flow_wildcards',
+                                  entries=(fe.OFEnumEntry(name="OFPFW_IN_PORT", value=0x01, params={}),
+                                           fe.OFEnumEntry(name="OFPFW_DL_VLAN", value=0x2, params={})),
+                                  params = dict(wire_type="uint32_t", bitmask=True)
+                                 ),
+                        fe.OFEnum(name='ofp_queue_properties',
+                                  entries=(fe.OFEnumEntry(name="OFPQT_NONE", value=0x00, params={}),
+                                           fe.OFEnumEntry(name="OFPQT_MIN_RATE", value=0x1, params={})),
+                                  params = dict(wire_type="uint32_t")
+                                 ),
+                        )
+                    )
+
+        p = ir.build_protocol(version, [ input ])
+        eq_(0, len(p.classes))
+        eq_(2, len(p.enums))
+        e = p.enums[0]
+        eq_("ofp_flow_wildcards", e.name)
+        eq_(True, e.is_bitmask)
+        eq_("uint32_t", e.wire_type)
+        eq_(ir.OFEnumEntry(name="OFPFW_IN_PORT", value=0x01, params={}), e.entries[0])
+        eq_(ir.OFEnumEntry(name="OFPFW_DL_VLAN", value=0x02, params={}), e.entries[1])
+
+        e = p.enums[1]
+        eq_("ofp_queue_properties", e.name)
+        eq_(False, e.is_bitmask)
+        eq_("uint32_t", e.wire_type)
+        eq_(ir.OFEnumEntry(name="OFPQT_NONE", value=0x00, params={}), e.entries[0])
+        eq_(ir.OFEnumEntry(name="OFPQT_MIN_RATE", value=0x01, params={}), e.entries[1])
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/utest/test_frontend.py b/utest/test_frontend.py
new file mode 100755
index 0000000..cfadc39
--- /dev/null
+++ b/utest/test_frontend.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import sys
+import os
+import unittest
+
+root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
+sys.path.insert(0, root_dir)
+
+import loxi_front_end.parser as parser
+import loxi_front_end.frontend as frontend
+from loxi_front_end.frontend_ir import *
+
+class FrontendTests(unittest.TestCase):
+    maxDiff = None
+
+    def test_simple(self):
+        ast = parser.parse("""
+#version 1
+
+enum ofp_port_config {
+    OFPPC_PORT_DOWN = 0x1,
+    OFPPC_NO_STP = 0x2,
+    OFPPC_NO_RECV = 0x4,
+    OFPPC_NO_RECV_STP = 0x8,
+    OFPPC_NO_FLOOD = 0x10,
+    OFPPC_NO_FWD = 0x20,
+    OFPPC_NO_PACKET_IN = 0x40,
+};
+
+#version 2
+
+struct of_echo_reply(align=8) {
+    uint8_t version;
+    uint8_t type == 3;
+    uint16_t length;
+    uint32_t xid;
+    of_octets_t data;
+};
+
+enum ofp_queue_op_failed_code(wire_type=uint32, bitmask=False, complete=True) {
+    OFPQOFC_BAD_PORT = 0,
+    OFPQOFC_BAD_QUEUE = 1,
+    OFPQOFC_EPERM = 2,
+};
+
+struct of_packet_queue {
+    uint32_t queue_id;
+    uint16_t len;
+    pad(2);
+    list(of_queue_prop_t) properties;
+};
+""")
+
+        # Not testing the parser, just making sure the AST is what we expect
+        expected_ast = [
+            ['metadata', 'version', '1'],
+            ['enum', 'ofp_port_config', [], [
+                ['OFPPC_PORT_DOWN', [], 1],
+                ['OFPPC_NO_STP', [], 2],
+                ['OFPPC_NO_RECV', [], 4],
+                ['OFPPC_NO_RECV_STP', [], 8],
+                ['OFPPC_NO_FLOOD', [], 16],
+                ['OFPPC_NO_FWD', [], 32],
+                ['OFPPC_NO_PACKET_IN', [], 64]]],
+            ['metadata', 'version', '2'],
+            ['struct', 'of_echo_reply', [['align', '8']], None, [
+                ['data', ['scalar', 'uint8_t'], 'version'],
+                ['type', ['scalar', 'uint8_t'], 'type', 3],
+                ['data', ['scalar', 'uint16_t'], 'length'],
+                ['data', ['scalar', 'uint32_t'], 'xid'],
+                ['data', ['scalar', 'of_octets_t'], 'data']]],
+            ['enum', 'ofp_queue_op_failed_code',
+                [['wire_type', 'uint32'], ['bitmask','False'], ['complete', 'True']], [
+                ['OFPQOFC_BAD_PORT', [], 0],
+                ['OFPQOFC_BAD_QUEUE', [], 1],
+                ['OFPQOFC_EPERM', [], 2]]],
+            ['struct', 'of_packet_queue', [], None, [
+                ['data', ['scalar', 'uint32_t'], 'queue_id'],
+                ['data', ['scalar', 'uint16_t'], 'len'],
+                ['pad', 2],
+                ['data', ['list', 'list(of_queue_prop_t)'], 'properties']]],
+        ]
+        self.assertEquals(expected_ast, ast)
+
+        ofinput = frontend.create_ofinput("standard-1.0", ast)
+        self.assertEquals(set([1, 2]), ofinput.wire_versions)
+        expected_classes = [
+            OFClass(name='of_echo_reply', superclass=None, members=[
+                OFDataMember('version', 'uint8_t'), # XXX
+                OFTypeMember('type', 'uint8_t', 3),
+                OFLengthMember('length', 'uint16_t'),
+                OFDataMember('xid', 'uint32_t'),
+                OFDataMember('data', 'of_octets_t')], virtual=False,
+                params={'align': '8'}),
+            OFClass(name='of_packet_queue', superclass=None, members=[
+                OFDataMember('queue_id', 'uint32_t'),
+                OFLengthMember('len', 'uint16_t'),
+                OFPadMember(2),
+                OFDataMember('properties', 'list(of_queue_prop_t)')], virtual=False, params={}),
+        ]
+        self.assertEquals(expected_classes, ofinput.classes)
+        expected_enums = [
+            OFEnum(name='ofp_port_config', entries=[
+                OFEnumEntry('OFPPC_PORT_DOWN', 1, {}),
+                OFEnumEntry('OFPPC_NO_STP', 2, {}),
+                OFEnumEntry('OFPPC_NO_RECV', 4, {}),
+                OFEnumEntry('OFPPC_NO_RECV_STP', 8, {}),
+                OFEnumEntry('OFPPC_NO_FLOOD', 16, {}),
+                OFEnumEntry('OFPPC_NO_FWD', 32, {}),
+                OFEnumEntry('OFPPC_NO_PACKET_IN', 64, {})], params={}),
+            OFEnum(name='ofp_queue_op_failed_code', entries=[
+                OFEnumEntry('OFPQOFC_BAD_PORT', 0, {}),
+                OFEnumEntry('OFPQOFC_BAD_QUEUE', 1, {}),
+                OFEnumEntry('OFPQOFC_EPERM', 2, {})],
+                params={'wire_type': 'uint32', 'bitmask': 'False', 'complete': 'True'}),
+        ]
+        self.assertEquals(expected_enums, ofinput.enums)
+
+    def test_inheritance(self):
+        ast = parser.parse("""
+#version 1
+
+struct of_queue_prop {
+    uint16_t type == ?;
+    uint16_t len;
+    pad(4);
+};
+
+struct of_queue_prop_min_rate : of_queue_prop {
+    uint16_t type == 1;
+    uint16_t len;
+    pad(4);
+    uint16_t rate;
+    pad(6);
+};
+""")
+
+        # Not testing the parser, just making sure the AST is what we expect
+        expected_ast = [
+            ['metadata', 'version', '1'],
+
+            ['struct', 'of_queue_prop', [], None, [
+                ['discriminator', ['scalar', 'uint16_t'], 'type'],
+                ['data', ['scalar', 'uint16_t'], 'len'],
+                ['pad', 4]]],
+
+            ['struct', 'of_queue_prop_min_rate', [], 'of_queue_prop', [
+                ['type', ['scalar', 'uint16_t'], 'type', 1],
+                ['data', ['scalar', 'uint16_t'], 'len'],
+                ['pad', 4],
+                ['data', ['scalar', 'uint16_t'], 'rate'],
+                ['pad', 6]]],
+        ]
+        self.assertEquals(expected_ast, ast)
+
+        ofinput = frontend.create_ofinput("standard-1.0", ast)
+        expected_classes = [
+            OFClass(name='of_queue_prop', superclass=None, members=[
+                OFDiscriminatorMember('type', 'uint16_t'),
+                OFLengthMember('len', 'uint16_t'),
+                OFPadMember(4)], virtual=True, params={}),
+            OFClass(name='of_queue_prop_min_rate', superclass='of_queue_prop', members= [
+                OFTypeMember('type', 'uint16_t', 1),
+                OFLengthMember('len', 'uint16_t'),
+                OFPadMember(4),
+                OFDataMember('rate', 'uint16_t'),
+                OFPadMember(6)], virtual=False, params= {}),
+        ]
+        self.assertEquals(expected_classes, ofinput.classes)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/utest/test_generic_utils.py b/utest/test_generic_utils.py
new file mode 100755
index 0000000..58e3a4a
--- /dev/null
+++ b/utest/test_generic_utils.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import sys
+import os
+import unittest
+
+root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
+sys.path.insert(0, root_dir)
+
+from generic_utils import *
+
+class MyHash(object):
+    def __init__(self, val):
+        self.val = val
+
+    def __hash__(self):
+        return hash(self.val)
+
+    def __str__(self):
+        return "BoringConstantString"
+
+    def __eq__(self, o ):
+        return type(self) == type(o) and self.val == o.val
+
+class GenericTest(unittest.TestCase):
+    def test_memoize_simple(self):
+        self.count = 0
+
+        @memoize
+        def function():
+            self.count += 1
+            return "Foo"
+
+        self.assertEquals(0, self.count)
+        self.assertEquals("Foo", function())
+        self.assertEquals(1, self.count)
+        self.assertEquals("Foo", function())
+        self.assertEquals(1, self.count)
+
+    def test_memoize_string_args(self):
+        self.count = 0
+
+        @memoize
+        def function(a, b):
+            self.count += 1
+            return "%s:%s" % (a,b)
+
+        self.assertEquals(0, self.count)
+        self.assertEquals("a:b", function('a', 'b'))
+        self.assertEquals(1, self.count)
+        self.assertEquals("ab:", function('ab', ''))
+        self.assertEquals(2, self.count)
+        self.assertEquals("ab:", function('ab', ''))
+        self.assertEquals(2, self.count)
+
+    def test_memoize_kw_args(self):
+        self.count = 0
+
+        @memoize
+        def function(**kw):
+            self.count += 1
+            return ",".join("{k}={v}".format(k=k,v=v) for k,v in kw.items())
+
+        self.assertEquals(0, self.count)
+        self.assertEquals("a=1", function(a=1))
+        self.assertEquals(1, self.count)
+        self.assertEquals("a=1,b=2", function(a=1, b=2))
+        self.assertEquals(2, self.count)
+        self.assertEquals("a=1", function(a=1))
+        self.assertEquals(2, self.count)
+        self.assertEquals("a=1,b=BoringConstantString", function(a=1, b=MyHash('1')))
+        self.assertEquals(3, self.count)
+
+    def test_memoize_with_hashable_object(self):
+        self.count = 0
+
+        @memoize
+        def function(a):
+            self.count += 1
+            return a.val
+
+        self.assertEquals(0, self.count)
+        self.assertEquals("a", function(MyHash('a')))
+        self.assertEquals(1, self.count)
+        self.assertEquals("b", function(MyHash('b')))
+        self.assertEquals(2, self.count)
+        self.assertEquals("a", function(MyHash('a')))
+        self.assertEquals(2, self.count)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/utest/test_parser.py b/utest/test_parser.py
index 33ba545..8acfdda 100755
--- a/utest/test_parser.py
+++ b/utest/test_parser.py
@@ -26,7 +26,13 @@
 # EPL for the specific language governing permissions and limitations
 # under the EPL.
 
+import sys
+import os
 import unittest
+
+root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
+sys.path.insert(0, root_dir)
+
 import pyparsing
 import loxi_front_end.parser as parser
 
@@ -36,7 +42,7 @@
 struct foo { };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(), [['struct', 'foo', []]])
+        self.assertEquals(ast, [['struct', 'foo', [], None, []]])
 
     def test_one_field(self):
         src = """\
@@ -45,8 +51,18 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
-            [['struct', 'foo', [['uint32_t', 'bar']]]])
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['data', ['scalar', 'uint32_t'], 'bar']]]])
+
+    def test_struct_align_arg(self):
+        src = """\
+struct foo(align=8) {
+    uint32_t bar;
+};
+"""
+        ast = parser.parse(src)
+        self.assertEquals(ast,
+            [['struct', 'foo', [['align', '8']], None, [['data', ['scalar', 'uint32_t'], 'bar']]]])
 
     def test_multiple_fields(self):
         src = """\
@@ -57,11 +73,11 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
-            [['struct', 'foo',
-                [['uint32_t', 'bar'],
-                 ['uint8_t', 'baz'],
-                 ['uint64_t', 'abc']]]])
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None,
+                [['data', ['scalar', 'uint32_t'], 'bar'],
+                 ['data', ['scalar', 'uint8_t'], 'baz'],
+                 ['data', ['scalar', 'uint64_t'], 'abc']]]])
 
     def test_array_type(self):
         src = """\
@@ -70,8 +86,8 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
-            [['struct', 'foo', [['uint32_t[4]', 'bar']]]])
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['data', ['array', 'uint32_t[4]'], 'bar']]]])
 
     def test_list_type(self):
         src = """\
@@ -80,8 +96,48 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
-            [['struct', 'foo', [['list(of_action_t)', 'bar']]]])
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['data', ['list', 'list(of_action_t)'], 'bar']]]])
+
+    def test_pad_member(self):
+        src = """\
+struct foo {
+    pad(1);
+};
+"""
+        ast = parser.parse(src)
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['pad', 1]]]])
+
+    def test_type_member(self):
+        src = """\
+struct foo {
+    uint16_t foo == 0x10;
+};
+"""
+        ast = parser.parse(src)
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['type', ['scalar', 'uint16_t'], 'foo', 0x10]]]])
+
+    def test_inheritance(self):
+        src = """\
+struct foo : bar {
+    uint16_t foo == 0x10;
+};
+"""
+        ast = parser.parse(src)
+        self.assertEquals(ast,
+            [['struct', 'foo', [], 'bar', [['type', ['scalar', 'uint16_t'], 'foo', 0x10]]]])
+
+    def test_discriminator(self):
+        src = """\
+struct foo {
+    uint16_t foo == ?;
+};
+"""
+        ast = parser.parse(src)
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['discriminator', ['scalar', 'uint16_t'], 'foo']]]])
 
 class EnumTests(unittest.TestCase):
     def test_empty(self):
@@ -90,7 +146,7 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(), [['enum', 'foo', []]])
+        self.assertEquals(ast, [['enum', 'foo', [], []]])
 
     def test_one(self):
         src = """\
@@ -99,7 +155,18 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(), [['enum', 'foo', [['BAR', 1]]]])
+        self.assertEquals(ast, [['enum', 'foo', [], [['BAR', [], 1]]]])
+
+    def test_params(self):
+        src = """\
+enum foo(wire_type=uint32, bitmask=False, complete=False) {
+    BAR = 1
+};
+"""
+        ast = parser.parse(src)
+        self.assertEquals(ast, [['enum', 'foo',
+            [ ['wire_type', 'uint32'], ['bitmask','False'], ['complete', 'False']],
+            [['BAR', [], 1]]]])
 
     def test_multiple(self):
         src = """\
@@ -110,7 +177,7 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(), [['enum', 'foo', [['OFP_A', 1], ['OFP_B', 2], ['OFP_C', 3]]]])
+        self.assertEquals(ast, [['enum', 'foo', [], [['OFP_A', [], 1], ['OFP_B', [], 2], ['OFP_C', [], 3]]]])
 
     def test_trailing_comma(self):
         src = """\
@@ -121,7 +188,7 @@
 };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(), [['enum', 'foo', [['OFP_A', 1], ['OFP_B', 2], ['OFP_C', 3]]]])
+        self.assertEquals(ast, [['enum', 'foo', [], [['OFP_A', [], 1], ['OFP_B', [], 2], ['OFP_C', [], 3]]]])
 
 class TestMetadata(unittest.TestCase):
     def test_version(self):
@@ -129,7 +196,7 @@
 #version 1
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(), [['metadata', 'version', '1']])
+        self.assertEquals(ast, [['metadata', 'version', '1']])
 
 class TestToplevel(unittest.TestCase):
     def test_multiple_structs(self):
@@ -138,22 +205,22 @@
 struct bar { };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
-            [['struct', 'foo', []], ['struct', 'bar', []]])
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, []], ['struct', 'bar', [], None, []]])
 
     def test_comments(self):
         src = """\
 // comment 1
 struct foo { //comment 2
 // comment 3
-   uint32_t a; //comment 5 
+   uint32_t a; //comment 5
 // comment 6
 };
 // comment 4
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
-            [['struct', 'foo', [['uint32_t', 'a']]]])
+        self.assertEquals(ast,
+            [['struct', 'foo', [], None, [['data', ['scalar', 'uint32_t'], 'a']]]])
 
     def test_mixed(self):
         src = """\
@@ -163,11 +230,11 @@
 struct bar { };
 """
         ast = parser.parse(src)
-        self.assertEquals(ast.asList(),
+        self.assertEquals(ast,
             [['metadata', 'version', '1'],
-             ['struct', 'foo', []],
+             ['struct', 'foo', [], None, []],
              ['metadata', 'version', '2'],
-             ['struct', 'bar', []]])
+             ['struct', 'bar', [], None, []]])
 
 class TestErrors(unittest.TestCase):
     def syntax_error(self, src, regex):
diff --git a/utest/test_test_data.py b/utest/test_test_data.py
new file mode 100755
index 0000000..d0ed683
--- /dev/null
+++ b/utest/test_test_data.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import sys
+import os
+import unittest
+
+root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
+sys.path.insert(0, root_dir)
+
+import test_data
+
+class DataFileTests(unittest.TestCase):
+    def test_example(self):
+        self.assertTrue('./example.data' in test_data.list_files())
+        data = test_data.read('example.data')
+        self.assertEquals(sorted(['section1', 'section2', 'binary']), sorted(data.keys()))
+        self.assertEquals(' abc def\nghi', data['section1'])
+        self.assertEquals('123\n456\n789', data['section2'])
+        self.assertEquals('\x00\x01\x02\x03\x04\x05\x06\x07\x77\x66\x55\x44\x33\x22\x11\x00',
+                          data['binary'])
+
+    # Just make sure all included data files parse without exceptions
+    def test_all(self):
+        for name in test_data.list_files():
+            test_data.read(name)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/wireshark_gen/__init__.py b/wireshark_gen/__init__.py
new file mode 100644
index 0000000..a610f3f
--- /dev/null
+++ b/wireshark_gen/__init__.py
@@ -0,0 +1,123 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+import os
+from collections import namedtuple
+import loxi_utils.loxi_utils as utils
+import loxi_front_end
+import loxi_globals
+from loxi_ir import *
+import field_info
+import template_utils
+
+templates_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
+
+DissectorField = namedtuple("DissectorField", ["fullname", "name", "type", "base", "enum_table"])
+
+proto_names = { 1: 'of10', 2: 'of11', 3: 'of12', 4: 'of13' }
+def make_field_name(version, ofclass_name, member_name):
+    return "%s.%s.%s" % (proto_names[version.wire_version],
+                         ofclass_name[3:],
+                         member_name)
+
+def get_reader(version, cls, m):
+    """
+    Decide on a reader function to use for the given field
+    """
+    ofproto = loxi_globals.ir[version]
+    enum = ofproto.enum_by_name(m.oftype)
+    if enum and 'wire_type' in enum.params:
+        return "read_" + enum.params['wire_type']
+    elif (cls.name, m.name) in field_info.reader_overrides:
+        return field_info.reader_overrides[(cls.name, m.name)]
+    else:
+        return "read_" + m.oftype.replace(')', '').replace('(', '_')
+
+def get_field_info(version, cls, name, oftype):
+    """
+    Decide on a Wireshark type and base for a given field.
+
+    Returns (type, base)
+    """
+    if oftype.startswith("list"):
+        return "bytes", "NONE", "nil"
+
+    ofproto = loxi_globals.ir[version]
+
+    enum = ofproto.enum_by_name(oftype)
+    if not enum and (cls, name) in field_info.class_field_to_enum:
+        enum_name = field_info.class_field_to_enum[(cls, name)]
+        enum = ofproto.enum_by_name(enum_name)
+
+    if enum:
+        field_type = "uint32"
+    elif oftype in field_info.oftype_to_wireshark_type:
+        field_type = field_info.oftype_to_wireshark_type[oftype]
+    else:
+        print "WARN missing oftype_to_wireshark_type for", oftype
+        field_type = "bytes"
+
+    if enum:
+        if enum.is_bitmask:
+            field_base = "HEX"
+        else:
+            field_base = "DEC"
+    elif oftype in field_info.field_to_base:
+        field_base = field_info.field_to_base[name]
+    elif oftype in field_info.oftype_to_base:
+        field_base = field_info.oftype_to_base[oftype]
+    else:
+        print "WARN missing oftype_to_base for", oftype
+        field_base = "NONE"
+
+    if enum:
+        enum_table = 'enum_v%d_%s' % (version.wire_version, enum.name)
+    else:
+        enum_table = 'nil'
+
+    return field_type, field_base, enum_table
+
+def create_fields():
+    r = []
+    for version, ofproto in loxi_globals.ir.items():
+        for ofclass in ofproto.classes:
+            for m in ofclass.members:
+                if isinstance(m, OFPadMember):
+                    continue
+                fullname = make_field_name(version, ofclass.name, m.name)
+                field_type, field_base, enum_table = get_field_info(version, ofclass.name, m.name, m.oftype)
+                r.append(DissectorField(fullname, m.name, field_type, field_base, enum_table))
+
+    return r
+
+def generate(install_dir):
+    context = {
+        'fields': create_fields(),
+    }
+
+    with template_utils.open_output(install_dir, 'wireshark/openflow.lua') as out:
+        template_utils.render_template(out, "openflow.lua", [templates_dir], context)
diff --git a/wireshark_gen/field_info.py b/wireshark_gen/field_info.py
new file mode 100644
index 0000000..996accf
--- /dev/null
+++ b/wireshark_gen/field_info.py
@@ -0,0 +1,180 @@
+# Copyright 2013, Big Switch Networks, Inc.
+#
+# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+# the following special exception:
+#
+# LOXI Exception
+#
+# As a special exception to the terms of the EPL, you may distribute libraries
+# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+# that copyright and licensing notices generated by LoxiGen are not altered or removed
+# from the LoxiGen Libraries and the notice provided below is (i) included in
+# the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+# documentation for the LoxiGen Libraries, if distributed in binary form.
+#
+# Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+#
+# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+# a copy of the EPL at:
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# EPL for the specific language governing permissions and limitations
+# under the EPL.
+
+# Map from LOXI types to Wireshark types
+oftype_to_wireshark_type = {
+    "char": "int8",
+    "uint8_t": "uint8",
+    "uint16_t": "uint16",
+    "uint32_t": "uint32",
+    "uint64_t": "uint64",
+    "of_mac_addr_t": "ether",
+    "of_ipv4_t": "ipv4",
+    "of_ipv6_t": "ipv6",
+    "of_port_name_t": "stringz",
+    "of_table_name_t": "stringz",
+    "of_desc_str_t": "stringz",
+    "of_serial_num_t": "stringz",
+    "of_octets_t": "bytes",
+    "of_port_no_t": "uint32",
+    "of_port_desc_t": "stringz",
+    "of_bsn_vport_t": "bytes",
+    "of_bsn_vport_q_in_q_t": "bytes",
+    "of_fm_cmd_t": "uint16",
+    "of_wc_bmap_t": "uint64",
+    "of_match_bmap_t": "uint64",
+    "of_match_t": "bytes",
+    "of_oxm_t": "bytes",
+    "of_meter_features_t": "bytes",
+    "of_bitmap_128_t": "bytes",
+}
+
+# Map from LOXI type to Wireshark base
+oftype_to_base = {
+    "char": "DEC",
+    "uint8_t": "DEC",
+    "uint16_t": "DEC",
+    "uint32_t": "DEC",
+    "uint64_t": "DEC",
+    "of_mac_addr_t": "NONE",
+    "of_ipv4_t": "NONE",
+    "of_ipv6_t": "NONE",
+    "of_port_name_t": "NONE",
+    "of_table_name_t": "NONE",
+    "of_desc_str_t": "NONE",
+    "of_serial_num_t": "NONE",
+    "of_octets_t": "NONE",
+    "of_port_no_t": "DEC",
+    "of_port_desc_t": "NONE",
+    "of_bsn_vport_t": "NONE",
+    "of_bsn_vport_q_in_q_t": "NONE",
+    "of_fm_cmd_t": "DEC",
+    "of_wc_bmap_t": "HEX",
+    "of_match_bmap_t": "HEX",
+    "of_match_t": "NONE",
+    "of_oxm_t": "NONE",
+    "of_meter_features_t": "NONE",
+    "of_bitmap_128_t": "NONE",
+}
+
+# Use enums for certain fields where it isn't specified in the LOXI input
+class_field_to_enum = {
+    ('of_flow_mod', 'type'): 'ofp_type',
+    ('of_error_msg', 'type'): 'ofp_type',
+    ('of_stats_request', 'type'): 'ofp_type',
+    ('of_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_stats_request', 'flags'): 'ofp_stats_request_flags',
+    ('of_stats_reply', 'type'): 'ofp_type',
+    ('of_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_stats_reply', 'flags'): 'ofp_stats_reply_flags',
+    ('of_flow_mod', 'table_id'): 'ofp_table',
+    ('of_flow_mod', '_command'): 'ofp_flow_mod_command',
+    ('of_flow_mod', 'out_port'): 'ofp_port',
+    ('of_flow_mod', 'out_group'): 'ofp_group',
+    ('of_error_msg', 'err_type'): 'ofp_error_type',
+    ('of_port_mod', 'type'): 'ofp_type',
+    ('of_hello', 'type'): 'ofp_type',
+    ('of_features_request', 'type'): 'ofp_type',
+    ('of_features_reply', 'type'): 'ofp_type',
+    ('of_barrier_request', 'type'): 'ofp_type',
+    ('of_barrier_reply', 'type'): 'ofp_type',
+    ('of_echo_request', 'type'): 'ofp_type',
+    ('of_echo_reply', 'type'): 'ofp_type',
+    ('of_flow_delete', 'type'): 'ofp_type',
+    ('of_flow_add', 'type'): 'ofp_type',
+    ('of_port_status', 'type'): 'ofp_type',
+    ('of_match_v3', 'type'): 'ofp_match_type',
+    ('of_action_set_nw_ttl', 'type'): 'ofp_action_type',
+    ('of_action_set_field', 'type'): 'ofp_action_type',
+    ('of_action_output', 'type'): 'ofp_action_type',
+    ('of_action_group', 'type'): 'ofp_action_type',
+    ('of_action_id', 'type'): 'ofp_action_type',
+    ('of_instruction_apply_actions', 'type'): 'ofp_instruction_type',
+    ('of_instruction_write_actions', 'type'): 'ofp_instruction_type',
+    ('of_group_mod', 'group_type'): 'ofp_group_type',
+    ('of_group_mod', 'type'): 'ofp_type',
+    ('of_group_mod', 'command'): 'ofp_group_mod_command',
+    ('of_group_mod', 'group_id'): 'ofp_group',
+    ('of_packet_out', 'type'): 'ofp_type',
+    ('of_packet_in', 'type'): 'ofp_type',
+    ('of_packet_in', 'reason'): 'ofp_packet_in_reason',
+    ('of_flow_stats_request', 'type'): 'ofp_type',
+    ('of_flow_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_desc_stats_request', 'type'): 'ofp_type',
+    ('of_desc_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_queue_stats_request', 'type'): 'ofp_type',
+    ('of_queue_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_port_stats_request', 'type'): 'ofp_type',
+    ('of_port_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_table_stats_request', 'type'): 'ofp_type',
+    ('of_table_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_port_desc_stats_request', 'type'): 'ofp_type',
+    ('of_port_desc_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_meter_stats_request', 'type'): 'ofp_type',
+    ('of_meter_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_meter_features_stats_request', 'type'): 'ofp_type',
+    ('of_meter_features_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_meter_config_stats_request', 'type'): 'ofp_type',
+    ('of_meter_config_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_group_stats_request', 'type'): 'ofp_type',
+    ('of_group_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_group_features_stats_request', 'type'): 'ofp_type',
+    ('of_group_features_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_group_desc_stats_request', 'type'): 'ofp_type',
+    ('of_group_desc_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_aggregate_stats_request', 'type'): 'ofp_type',
+    ('of_aggregate_stats_request', 'stats_type'): 'ofp_stats_type',
+    ('of_async_get_request', 'type'): 'ofp_type',
+    ('of_flow_stats_reply', 'type'): 'ofp_type',
+    ('of_flow_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_desc_stats_reply', 'type'): 'ofp_type',
+    ('of_desc_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_queue_stats_reply', 'type'): 'ofp_type',
+    ('of_queue_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_port_stats_reply', 'type'): 'ofp_type',
+    ('of_port_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_table_stats_reply', 'type'): 'ofp_type',
+    ('of_table_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_port_desc_stats_reply', 'type'): 'ofp_type',
+    ('of_port_desc_stats_reply', 'stats_type'): 'ofp_stats_type',
+    ('of_aggregate_stats_reply', 'type'): 'ofp_type',
+    ('of_aggregate_stats_reply', 'stats_type'): 'ofp_stats_type',
+}
+
+# Override oftype_to_base for certain field names
+field_to_base = {
+    "eth_type": "HEX",
+    "cookie": "HEX",
+    "datapath_id": "HEX",
+}
+
+reader_overrides = {
+    ("of_packet_in", "data"): "read_ethernet",
+    ("of_packet_out", "data"): "read_ethernet",
+    ("of_bsn_pdu_tx_request", "data"): "read_ethernet",
+    ("of_bsn_pdu_rx_request", "data"): "read_ethernet",
+}
diff --git a/wireshark_gen/templates/.gitignore b/wireshark_gen/templates/.gitignore
new file mode 100644
index 0000000..c3ed10e
--- /dev/null
+++ b/wireshark_gen/templates/.gitignore
@@ -0,0 +1 @@
+*.cache
diff --git a/py_gen/templates/_unpack_packet_out.py b/wireshark_gen/templates/_copyright.lua
similarity index 63%
rename from py_gen/templates/_unpack_packet_out.py
rename to wireshark_gen/templates/_copyright.lua
index b97e829..8700ad3 100644
--- a/py_gen/templates/_unpack_packet_out.py
+++ b/wireshark_gen/templates/_copyright.lua
@@ -25,16 +25,18 @@
 :: # EPL for the specific language governing permissions and limitations
 :: # under the EPL.
 ::
-        version = struct.unpack_from('!B', buf, 0)[0]
-        assert(version == const.OFP_VERSION)
-        type = struct.unpack_from('!B', buf, 1)[0]
-        assert(type == const.OFPT_PACKET_OUT)
-        _length = struct.unpack_from('!H', buf, 2)[0]
-        assert(_length == len(buf))
-        if _length < 16: raise loxi.ProtocolError("packet_out length is %d, should be at least 16" % _length)
-        obj.xid = struct.unpack_from('!L', buf, 4)[0]
-        obj.buffer_id = struct.unpack_from('!L', buf, 8)[0]
-        obj.in_port = struct.unpack_from('!H', buf, 12)[0]
-        actions_len = struct.unpack_from('!H', buf, 14)[0]
-        obj.actions = action.unpack_list(buffer(buf, 16, actions_len))
-        obj.data = str(buffer(buf, 16+actions_len))
+-- Copyright 2013, Big Switch Networks, Inc. This library was generated
+-- by the LoxiGen Compiler.
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU General Public License as published by
+-- the Free Software Foundation, either version 2 of the License, or
+-- (at your option) any later version.
+-- 
+-- This program is distributed in the hope that it will be useful,
+-- but WITHOUT ANY WARRANTY; without even the implied warranty of
+-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-- GNU General Public License for more details.
+-- 
+-- You should have received a copy of the GNU General Public License
+-- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/wireshark_gen/templates/_ofclass_dissector.lua b/wireshark_gen/templates/_ofclass_dissector.lua
new file mode 100644
index 0000000..d327b49
--- /dev/null
+++ b/wireshark_gen/templates/_ofclass_dissector.lua
@@ -0,0 +1,78 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+::
+:: from loxi_ir import *
+:: from wireshark_gen import make_field_name, get_reader
+:: attrs = []
+:: if ofclass.virtual: attrs.append("virtual")
+:: if ofclass.superclass: attrs.append("child")
+:: if not ofclass.superclass: attrs.append("top-level")
+-- ${' '.join(attrs)} class ${ofclass.name}
+:: if ofclass.superclass:
+-- Child of ${ofclass.superclass.name}
+:: #endif
+:: if ofclass.virtual:
+-- Discriminator is ${ofclass.discriminator.name}
+:: #endif
+function ${name}(reader, subtree)
+:: field_length_members = []
+:: if ofclass.virtual:
+    return ${ofclass.name}_v${version.wire_version}_dissectors[reader.peek(${ofclass.discriminator.offset},${ofclass.discriminator.length}):uint()](reader, subtree)
+:: else:
+:: if not ofclass.is_fixed_length:
+    local _length = reader.peek(${ofclass.length_member.offset}, ${ofclass.length_member.base_length}):uint()
+    local orig_reader = reader
+    reader = orig_reader.slice(_length)
+:: #endif
+:: for m in ofclass.members:
+:: if isinstance(m, OFPadMember):
+    reader.skip(${m.length})
+:: continue
+:: #endif
+:: if isinstance(m, OFFieldLengthMember):
+    local _${m.field_name}_length = reader.peek(0, ${m.base_length}):uint()
+:: field_length_members.append(m.field_name)
+:: #endif
+:: if m.oftype.startswith("list"):
+:: class_name = m.oftype.replace('_t)', '').replace('(', '').replace('list', '')
+:: if m.name in field_length_members:
+    read_list(reader.slice(_${m.name}_length), dissect_${class_name}_v${version.wire_version}, subtree, '${class_name}')
+:: else:
+    read_list(reader, dissect_${class_name}_v${version.wire_version}, subtree, '${class_name}')
+:: #endif
+:: if ofclass.has_external_alignment:
+    orig_reader.skip_align()
+:: #endif
+:: else:
+:: field_name = make_field_name(version, ofclass.name, m.name)
+:: reader_name = get_reader(version, ofclass, m)
+    ${reader_name}(reader, ${version.wire_version}, subtree, '${field_name}')
+:: #endif
+:: #endfor
+    return '${ofclass.name}'
+:: #endif
+end
diff --git a/wireshark_gen/templates/_ofreader.lua b/wireshark_gen/templates/_ofreader.lua
new file mode 100644
index 0000000..a4c57be
--- /dev/null
+++ b/wireshark_gen/templates/_ofreader.lua
@@ -0,0 +1,72 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+::
+OFReader = {}
+OFReader.new = function(buf, offset)
+    local self = {}
+    offset = offset or 0
+
+    self.read = function(len)
+        local r = buf(offset, len)
+        offset = offset + len
+        return r
+    end
+
+    self.read_all = function()
+        local r = buf(offset, buf:len() - offset)
+        offset = buf:len()
+        return r
+    end
+
+    self.peek = function(off, len)
+        return buf(offset + off, len)
+    end
+
+    self.peek_all = function(off)
+        return buf(offset + off, buf:len() - offset - off)
+    end
+
+    self.skip = function(len)
+        offset = offset + len
+    end
+
+    self.is_empty = function()
+        return offset == buf:len()
+    end
+
+    self.slice = function(len)
+        r = OFReader.new(buf(offset, len))
+        offset = offset + len
+        return r
+    end
+    
+    self.skip_align = function()
+        offset = math.floor((offset + 7)/8)*8
+    end
+
+    return self
+end
diff --git a/wireshark_gen/templates/_oftype_readers.lua b/wireshark_gen/templates/_oftype_readers.lua
new file mode 100644
index 0000000..f1de7ec
--- /dev/null
+++ b/wireshark_gen/templates/_oftype_readers.lua
@@ -0,0 +1,175 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+
+function read_scalar(reader, subtree, field_name, length)
+    subtree:add(fields[field_name], reader.read(length))
+end
+
+function read_uint8_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 1)
+end
+
+function read_uint16_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 2)
+end
+
+function read_uint32_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 4)
+end
+
+function read_uint64_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 8)
+end
+
+function read_of_bitmap_128_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 16)
+end
+
+function read_of_checksum_128_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 16)
+end
+
+function read_of_octets_t(reader, version, subtree, field_name)
+    if not reader.is_empty() then
+        subtree:add(fields[field_name], reader.read_all())
+    end
+end
+
+function read_list_of_hello_elem_t(reader, version, subtree, field_name)
+    -- TODO
+end
+
+function read_of_match_t(reader, version, subtree, field_name)
+    if version == 1 then
+        dissect_of_match_v1_v1(reader, subtree:add("of_match"))
+    elseif version == 2 then
+        dissect_of_match_v2_v2(reader, subtree:add("of_match"))
+    elseif version >= 3 then
+        dissect_of_match_v3_v3(reader, subtree:add("of_match"))
+    end
+end
+
+function read_of_wc_bmap_t(reader, version, subtree, field_name)
+    if version <= 2 then
+        read_scalar(reader, subtree, field_name, 4)
+    else
+        read_scalar(reader, subtree, field_name, 8)
+    end
+end
+
+function read_of_port_no_t(reader, version, subtree, field_name)
+    if version == 1 then
+        read_scalar(reader, subtree, field_name, 2)
+    else
+        read_scalar(reader, subtree, field_name, 4)
+    end
+end
+
+function read_of_port_name_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 16)
+end
+
+function read_of_mac_addr_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 6)
+end
+
+function read_of_ipv4_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 4)
+end
+
+function read_of_ipv6_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 16)
+end
+
+function read_of_fm_cmd_t(reader, version, subtree, field_name)
+    if version == 1 then
+        read_scalar(reader, subtree, field_name, 2)
+    else
+        read_scalar(reader, subtree, field_name, 1)
+    end
+end
+
+function read_of_desc_str_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 256)
+end
+
+function read_of_serial_num_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 32)
+end
+
+function read_of_table_name_t(reader, version, subtree, field_name)
+    read_scalar(reader, subtree, field_name, 32)
+end
+
+function read_of_port_desc_t(reader, version, subtree, field_name)
+    if reader.is_empty() then
+        return
+    end
+    local child_subtree = subtree:add(fields[field_name], reader.peek_all(0))
+    local info = of_port_desc_dissectors[version](reader, child_subtree)
+    child_subtree:set_text(info)
+end
+
+function read_of_oxm_t(reader, version, subtree, field_name)
+    if reader.is_empty() then
+        return
+    end
+    local child_subtree = subtree:add(fields[field_name], reader.peek_all(0))
+    local info = of_oxm_dissectors[version](reader, child_subtree)
+    child_subtree:set_text(info)
+end
+
+function read_list(reader, dissector, subtree, field_name)
+    if not reader.is_empty() then
+        local list_subtree = subtree:add(field_name .. " list", reader.peek_all(0))
+        while not reader.is_empty() do
+            local atom_subtree = list_subtree:add(field_name, reader.peek_all(0))
+            local info = dissector(reader, atom_subtree)
+            atom_subtree:set_text(info)
+        end
+    else
+        return
+    end
+end
+
+function read_ethernet(reader, version, subtree, field_name)
+    if reader.is_empty() then
+        return
+    end
+    local child_subtree = subtree:add(fields[field_name], reader.peek_all(0))
+    child_subtree:set_text("Ethernet packet")
+    ethernet_dissector:call(reader.read_all():tvb(), current_pkt, child_subtree)
+end
+
+function read_of_bsn_vport_q_in_q_t(reader, version, subtree, field_name)
+    if reader.is_empty() then
+        return
+    end
+    local child_subtree = subtree:add(fields[field_name], reader.peek_all(0))
+    local info = of_bsn_vport_q_in_q_dissectors[version](reader, child_subtree)
+    child_subtree:set_text(info)
+end
diff --git a/wireshark_gen/templates/openflow.lua b/wireshark_gen/templates/openflow.lua
new file mode 100644
index 0000000..fea6676
--- /dev/null
+++ b/wireshark_gen/templates/openflow.lua
@@ -0,0 +1,196 @@
+:: # Copyright 2013, Big Switch Networks, Inc.
+:: #
+:: # LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with
+:: # the following special exception:
+:: #
+:: # LOXI Exception
+:: #
+:: # As a special exception to the terms of the EPL, you may distribute libraries
+:: # generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided
+:: # that copyright and licensing notices generated by LoxiGen are not altered or removed
+:: # from the LoxiGen Libraries and the notice provided below is (i) included in
+:: # the LoxiGen Libraries, if distributed in source code form and (ii) included in any
+:: # documentation for the LoxiGen Libraries, if distributed in binary form.
+:: #
+:: # Notice: "Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler."
+:: #
+:: # You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain
+:: # a copy of the EPL at:
+:: #
+:: # http://www.eclipse.org/legal/epl-v10.html
+:: #
+:: # Unless required by applicable law or agreed to in writing, software
+:: # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: # EPL for the specific language governing permissions and limitations
+:: # under the EPL.
+::
+:: import loxi_globals
+:: ir = loxi_globals.ir
+:: include('_copyright.lua')
+
+-- Copy this file to your wireshark plugin directory:
+--   Linux / OS X: ~/.wireshark/plugins/
+--   Windows: C:\Documents and Settings\<username>\Application Data\Wireshark\plugins\
+-- You may need to create the directory.
+
+-- The latest version of this dissector is always available at:
+-- http://www.projectfloodlight.org/openflow.lua
+
+:: include('_ofreader.lua')
+
+p_of = Proto ("of", "OpenFlow")
+ethernet_dissector = Dissector.get("eth")
+
+current_pkt = nil
+
+local openflow_versions = {
+:: for version in loxi_globals.OFVersions.all_supported:
+    [${version.wire_version}] = "${version.version}",
+:: #endfor
+}
+
+:: for version, ofproto in ir.items():
+:: for enum in ofproto.enums:
+local enum_v${version.wire_version}_${enum.name} = {
+:: for (name, value) in enum.values:
+    [${value}] = "${name}",
+:: #endfor
+}
+
+:: #endfor
+
+:: #endfor
+
+
+fields = {}
+:: for field in fields:
+:: if field.type in ["uint8", "uint16", "uint32", "uint64"]:
+fields[${repr(field.fullname)}] = ProtoField.${field.type}("${field.fullname}", "${field.name}", base.${field.base}, ${field.enum_table})
+:: elif field.type in ["ipv4", "ipv6", "ether", "bytes", "stringz"]:
+fields[${repr(field.fullname)}] = ProtoField.${field.type}("${field.fullname}", "${field.name}")
+:: else:
+:: raise NotImplementedError("unknown Wireshark type " + field.type)
+:: #endif
+:: #endfor
+
+p_of.fields = {
+:: for field in fields:
+    fields[${repr(field.fullname)}],
+:: #endfor
+}
+
+-- Subclass maps for virtual classes
+:: for version, ofproto in ir.items():
+:: for ofclass in ofproto.classes:
+:: if ofclass.virtual:
+${ofclass.name}_v${version.wire_version}_dissectors = {}
+:: #endif
+:: #endfor
+:: #endfor
+
+--- Dissectors for each class
+:: for version, ofproto in ir.items():
+:: for ofclass in ofproto.classes:
+:: name = 'dissect_%s_v%d' % (ofclass.name, version.wire_version)
+:: include('_ofclass_dissector.lua', name=name, ofclass=ofclass, version=version)
+:: if ofclass.superclass:
+:: discriminator = ofclass.superclass.discriminator
+:: discriminator_value = ofclass.member_by_name(discriminator.name).value
+${ofclass.superclass.name}_v${version.wire_version}_dissectors[${discriminator_value}] = ${name}
+
+:: #endif
+:: #endfor
+:: #endfor
+
+local of_message_dissectors = {
+:: for version in ir:
+    [${version.wire_version}] = dissect_of_header_v${version.wire_version},
+:: #endfor
+}
+
+local of_port_desc_dissectors = {
+:: for version in ir:
+    [${version.wire_version}] = dissect_of_port_desc_v${version.wire_version},
+:: #endfor
+}
+
+local of_oxm_dissectors = {
+:: for version in ir:
+    [${version.wire_version}] = dissect_of_oxm_v${version.wire_version},
+:: #endfor
+}
+
+local of_bsn_vport_q_in_q_dissectors = {
+:: for version in ir:
+    [${version.wire_version}] = dissect_of_bsn_vport_q_in_q_v${version.wire_version},
+:: #endfor
+}
+
+:: include('_oftype_readers.lua')
+
+function dissect_of_message(buf, root)
+    local reader = OFReader.new(buf)
+    local subtree = root:add(p_of, buf(0))
+    local version_val = buf(0,1):uint()
+    local type_val = buf(1,1):uint()
+
+    local protocol = "OF ?"
+    if openflow_versions[version_val] then
+        protocol = "OF " .. openflow_versions[version_val]
+    else
+        return "Unknown protocol", "Dissection error"
+    end
+
+    local info = "unknown"
+    info = of_message_dissectors[version_val](reader, subtree)
+
+    return protocol, info
+end
+
+-- of dissector function
+function p_of.dissector (buf, pkt, root)
+    local offset = 0
+    current_pkt = pkt
+    repeat
+        if buf:len() - offset >= 4 then
+            local msg_len = buf(offset+2,2):uint()
+
+            if msg_len < 8 then
+                break
+            end
+
+            if offset + msg_len > buf:len() then
+                -- we don't have all the data we need yet
+                pkt.desegment_len = offset + msg_len - buf:len()
+                return
+            end
+
+            protocol, info = dissect_of_message(buf(offset, msg_len), root)
+
+            if offset == 0 then
+                pkt.cols.protocol:clear()
+                pkt.cols.info:clear()
+            else
+                pkt.cols.protocol:append(" + ")
+                pkt.cols.info:append(" + ")
+            end
+            pkt.cols.protocol:append(protocol)
+            pkt.cols.info:append(info)
+            offset = offset + msg_len
+        else
+            -- we don't have all of length field yet
+            pkt.desegment_len = DESEGMENT_ONE_MORE_SEGMENT
+            return
+        end
+    until offset >= buf:len()
+end
+
+-- Initialization routine
+function p_of.init()
+end
+
+-- register a chained dissector for OpenFlow port numbers
+local tcp_dissector_table = DissectorTable.get("tcp.port")
+tcp_dissector_table:add(6633, p_of)
+tcp_dissector_table:add(6653, p_of)