[ffc] 01/03: New upstream version 2017.1.0

Johannes Ring johannr-guest at moszumanska.debian.org
Wed May 10 12:15:34 UTC 2017


This is an automated email from the git hooks/post-receive script.

johannr-guest pushed a commit to branch experimental
in repository ffc.

commit 429b56998b829dcf12966b13b6ab87ad7ddee51c
Author: Johannes Ring <johannr at simula.no>
Date:   Wed May 10 12:45:18 2017 +0200

    New upstream version 2017.1.0
---
 ChangeLog                                          |  471 --------
 ChangeLog.rst                                      |  698 +++++++++++
 MANIFEST.in                                        |   15 +
 README.rst                                         |    2 +-
 .../Convection_3D_1.ufl                            |   20 +-
 .../Convection_3D_2.ufl                            |   20 +-
 .../__init__.py => bench/MassH1_3D_1.ufl           |   18 +-
 .../__init__.py => bench/MassH1_3D_2.ufl           |   18 +-
 demo/AdaptivePoisson.ufl => bench/Poisson_3D_1.ufl |   14 +-
 demo/AdaptivePoisson.ufl => bench/Poisson_3D_2.ufl |   14 +-
 .../WeightedPoisson_3D_1.ufl                       |   15 +-
 .../WeightedPoisson_3D_2.ufl                       |   15 +-
 bench/bench.py                                     |    2 +-
 bench/utils.py                                     |   18 +-
 demo/AdaptivePoisson.ufl                           |    2 +-
 demo/Biharmonic.ufl                                |   11 +-
 demo/FacetIntegrals.ufl                            |    3 +-
 demo/Mini.ufl                                      |    7 +-
 demo/{AdaptivePoisson.ufl => MixedCoefficient.ufl} |   25 +-
 demo/PoissonDG.ufl                                 |    6 +-
 demo/{AdaptivePoisson.ufl => PoissonQuad.ufl}      |   26 +-
 doc/sphinx/source/conf.py                          |   22 +-
 doc/sphinx/source/installation.rst                 |   56 +
 doc/sphinx/source/releases.rst                     |    1 +
 doc/sphinx/source/releases/v2017.1.0.rst           |   20 +
 ffc/__init__.py                                    |    7 +-
 ffc/__main__.py                                    |  244 +---
 ffc/analysis.py                                    |  337 ++++--
 ffc/backends/ufc/__init__.py                       |    6 +-
 ffc/backends/ufc/coordinate_mapping.py             |    4 +-
 ffc/backends/ufc/dofmap.py                         |    4 +-
 ffc/backends/ufc/finite_element.py                 |    4 +-
 ffc/backends/ufc/form.py                           |    4 +-
 ffc/backends/ufc/function.py                       |    4 +-
 ffc/backends/ufc/integrals.py                      |    4 +-
 ffc/backends/ufc/ufc.h                             |    8 +-
 ffc/codegeneration.py                              |   71 +-
 ffc/codesnippets.py                                |    1 +
 ffc/cpp.py                                         |   15 +-
 ffc/fiatinterface.py                               |    3 +-
 ffc/jitcompiler.py                                 |    6 +
 ffc/log.py                                         |    4 +-
 ffc/{__main__.py => main.py}                       |   63 +-
 ffc/optimization.py                                |   27 +-
 ffc/parameters.py                                  |   69 +-
 ffc/quadrature/__init__.py                         |    6 +-
 ffc/quadrature/optimisedquadraturetransformer.py   |    4 +-
 ffc/quadrature/parameters.py                       |   19 +-
 ffc/quadrature/quadraturegenerator.py              |    5 +
 ffc/quadrature/quadraturetransformer.py            |    4 +-
 ffc/quadrature/quadraturetransformerbase.py        |    4 +
 ffc/quadrature/symbolics.py                        |    4 +
 ffc/quadratureelement.py                           |    8 +-
 ffc/representation.py                              |   13 +-
 ffc/representationutils.py                         |   11 +-
 ffc/tensor/__init__.py                             |    7 +-
 ffc/tensor/monomialtransformation.py               |    1 +
 ffc/tensor/tensorgenerator.py                      |    8 +-
 .../tensor/tensoroptimization.py                   |   18 +-
 ffc/tensor/tensorreordering.py                     |    4 +-
 ffc/tsfc/__init__.py                               |    3 +
 ffc/tsfc/tsfcgenerator.py                          |   48 +
 ffc/tsfc/tsfcoptimization.py                       |   19 +
 ffc/tsfc/tsfcrepresentation.py                     |   50 +
 ffc/uflacs/analysis/balancing.py                   |    2 -
 ffc/uflacs/analysis/crsarray.py                    |    4 +-
 .../{graph_dependencies.py => dependencies.py}     |    4 +-
 ffc/uflacs/analysis/factorization.py               |  124 +-
 ffc/uflacs/analysis/graph_ssa.py                   |    2 +
 ffc/uflacs/backends/ffc/access.py                  |   89 +-
 ffc/uflacs/backends/ffc/common.py                  |    3 -
 ffc/uflacs/backends/ffc/definitions.py             |  127 +-
 ffc/uflacs/backends/ffc/symbols.py                 |  113 +-
 ffc/uflacs/build_uflacs_ir.py                      |  962 +++++++++++++++
 ffc/uflacs/elementtables.py                        |  641 ++++++++++
 ffc/uflacs/elementtables/table_utils.py            |  230 ----
 ffc/uflacs/elementtables/terminaltables.py         |  416 -------
 ffc/uflacs/generation/integralgenerator.py         |  510 --------
 ffc/uflacs/integralgenerator.py                    | 1219 ++++++++++++++++++++
 ffc/uflacs/language/cnodes.py                      |  654 ++++++++---
 ffc/uflacs/language/format_value.py                |   93 +-
 ffc/uflacs/language/ufl_to_cnodes.py               |   24 +-
 ffc/uflacs/representation/__init__.py              |   19 -
 ffc/uflacs/representation/build_uflacs_ir.py       |  437 -------
 ffc/uflacs/tools.py                                |   35 +-
 ffc/uflacs/uflacsgenerator.py                      |   49 +-
 ffc/uflacs/uflacsrepresentation.py                 |   40 +-
 setup.py                                           |    5 +-
 test/regression/README.rst                         |   64 +-
 test/regression/ffc-reference-data-id              |    2 +-
 test/regression/recdiff.py                         |    3 +-
 test/regression/scripts/upload                     |    2 +-
 test/regression/test.py                            |  254 +++-
 test/regression/ufctest.h                          |   80 +-
 test/uflacs/unit/test_cnodes.py                    |   34 +-
 test/uflacs/unit/test_factorization.py             |   31 +-
 test/uflacs/unit/test_format_code_structure.py     |    4 +-
 test/uflacs/unit/test_graph_algorithm.py           |    2 +-
 test/uflacs/unit/test_snippets.py                  |   54 +-
 test/uflacs/unit/test_ssa_manipulations.py         |    2 +-
 test/uflacs/unit/test_table_utils.py               |   67 +-
 test/uflacs/unit/test_ufc_backend.py               |    1 +
 test/unit/symbolics/test_float.py                  |    9 +-
 103 files changed, 5722 insertions(+), 3329 deletions(-)

diff --git a/ChangeLog b/ChangeLog
deleted file mode 100644
index ec9c6c1..0000000
--- a/ChangeLog
+++ /dev/null
@@ -1,471 +0,0 @@
-2016.2.0 [2016-11-30]
- - Jit compiler now compiles elements separately from forms to avoid duplicate work
- - Add parameter max_signature_length to optionally shorten signatures in the jit cache
- - Move uflacs module into ffc.uflacs
- - Remove installation of pkg-config and CMake files (UFC path and
-   compiler flags are available from ffc module)
- - Add dependency on dijitso and remove dependency on instant
- - Add experimental Bitbucket pipelines
- - Tidy the repo after UFC and UFLACS merge, and general spring cleanup. This
-   includes removal of instructions how to merge two repos, commit hash
-   c8389032268041fe94682790cb773663bdf27286.
-2016.1.0 [2016-06-23]
- - Add function get_ufc_include to get path to ufc.h
- - Merge UFLACS into FFC
- - Generalize ufc interface to non-affine parameterized coordinates
- - Add ufc::coordinate_mapping class
- - Make ufc interface depend on C++11 features requiring gcc version >= 4.8
- - Add function ufc_signature() to the form compiler interface
- - Add function git_commit_hash()
-1.6.0 [2015-07-28]
- - Rename and modify a number of UFC interface functions. See docstrings in ufc.h for details.
- - Bump required SWIG version to 3.0.3
- - Disable dual basis (tabulate_coordinates and evaluate_dofs) for enriched
-   elements until correct implementation is brought up
-1.5.0 [2015-01-12]
- - Remove FErari support
- - Add support for new integral type custom_integral
- - Support for new form compiler backend "uflacs", downloaded separately
-1.4.0 [2014-06-02]
- - Add support for integrals that know which coefficients they use
- - Many bug fixes for facet integrals over manifolds
- - Merge UFC into FFC; ChangeLog for UFC appended below
- - Various updates mirroring UFL changes
- - Experimental: New custom integral with user defined quadrature points
-1.3.0 [2014-01-07]
- - Fix bug with runtime check of SWIG version
- - Move DOLFIN wrappers here from DOLFIN
- - Add support for new UFL operators cell_avg and facet_avg
- - Add new reference data handling system, now data is kept in an external repository
- - Fix bugs with ignoring quadrature rule arguments
- - Use cpp optimization by default in jit compiler
-1.2.0 [2013-03-24]
- - New feature: Add basic support for point integrals on vertices
- - New feature: Add general support for m-dimensional cells in n-dimensional space (n >= m, n, m = 1, 2, 3)
-1.1.0 [2013-01-07]
- - Fix bug for Conditionals related to DG constant Coefficients. Bug #1082048.
- - Fix bug for Conditionals, precedence rules for And and Or. Bug #1075149.
- - Changed data structure from list to deque when pop(0) operation is needed, speeding up split_expression operation considerable
- - Other minor fixes
-1.0.0 [2011-12-07]
- - Issue warning when form integration requires more than 100 points
-1.0-rc1 [2011-11-28]
- - Fix bug with coordinates on facet integrals (intervals). Bug #888682.
- - Add support for FacetArea, new geometric quantity in UFL.
- - Fix bug in optimised quadrature code, AlgebraOperators demo. Bug #890859.
- - Fix bug with undeclared variables in optimised quadrature code. Bug #883202.
-1.0-beta2 [2011-10-11]
- - Added support for bessel functions, bessel_* (I,J,K,Y), in UFL.
- - Added support for error function, erf(), new math function in UFL.
- - Fix dof map 'need_entities' for Real spaces
- - Improve performance for basis function computation
-1.0-beta [2011-08-11]
- - Improve formatting of floats with up to one non-zero decimal place.
- - Fix bug involving zeros in products and sums. Bug #804160.
- - Fix bug for new conditions '&&', '||' and '!' in UFL. Bug #802560.
- - Fix bug involving VectorElement with dim=1. Bug #798578.
- - Fix bug with mixed element of symmetric tensor elements. Bug #745646.
- - Fix bug when using geometric coordinates with one quadrature point
-0.9.10 [2011-05-16]
- - Change license from GPL v3 or later to LGPL v3 or later
- - Add some schemes for low-order simplices
- - Request quadrature schemes by polynomial degree (not longer by number
-   of points in each direction)
- - Get quadrature schemes via ffc.quadrature_schemes
- - Improved lock handling in JIT compiler
- - Include common_cell in form signature
- - Add possibility to set swig binary and swig path
-0.9.9 [2011-02-23]
- - Add support for generating error control forms with option -e
- - Updates for UFC 2.0
- - Set minimal degree to 1 in automatic degree selection for expressions
- - Add command-line option -f no_ferari
- - Add support for plotting of elements
- - Add utility function compute_tensor_representation
-0.9.4 [2010-09-01]
- - Added memory cache in jit(), for preprocessed forms
- - Added support for Conditional and added demo/Conditional.ufl.
- - Added support for new geometric quantity Circumradius in UFL.
- - Added support for new geometric quantity CellVolume in UFL.
-0.9.3 [2010-07-01]
- - Make global_dimension for Real return an int instead of double, bug # 592088
- - Add support for facet normal in 1D.
- - Expose -feliminate_zeros for quadrature optimisations to give user more
-   control
- - Remove return of form in compile_form
- - Remove object_names argument to compile_element
- - Rename ElementUnion -> EnrichedElement
- - Add support for tan() and inverse trigonometric functions
- - Added support for ElementUnion (i.e. span of combinations of elements)
- - Added support for Bubble elements
- - Added support for UFL.SpatialCoordinate.
-0.9.2 [2010-02-17]
- - Bug fix in removal of unused variables in Piola-mapped terms for tensor
-   representation
-0.9.1 [2010-02-15]
- - Add back support for FErari optimizations
- - Bug fixes in JIT compiler
-0.9.0 [2010-02-02]
- - Updates for FIAT 0.9.0
- - Updates for UFC 1.4.0 (now supporting the full interface)
- - Automatic selection of representation
- - Change quadrature_order --> quadrature_degree
- - Split compile() --> compile_form(), compile_element()
- - Major cleanup and reorganization of code (flatter directories)
- - Updates for changes in UFL: Argument, Coefficient, FormData
-0.7.1
- - Handle setting quadrature degree when it is set to None in UFL form
- - Added demo: HyperElasticity.ufl
-0.7.0
- - Move contents of TODO to: https://blueprints.launchpad.net/ffc
- - Support for restriction of finite elements to only consider facet dofs
- - Use quadrature_order from metadata when integrating terms using tensor representation
- - Use loop to reset the entries of the local element tensor
- - Added new symbolic classes for quadrature optimisation (speed up compilation)
- - Added demos: Biharmonic.ufl, div(grad(v)) term;
-                ReactionDiffusion.ufl, tuple notation;
-                MetaData.ufl, how to attach metadata to the measure;
-                ElementRestriction.ufl, restriction of elements to facets
- - Tabulate the coordinates of the integration points in the tabulate_tensor() function
- - Change command line option '-f split_implementation' -> '-f split'
- - Renaming of files and restructuring of the compiler directory
- - Added option -q rule (--quadrature-rule rule) to specify which rule to use
-   for integration of a given integral. (Can also bet set through the metadata
-   through "quadrature_rule"). No rules have yet been implemented, so default
-   is the FIAT rule.
- - Remove support for old style .form files/format
-0.6.2 [2009-04-07]
- - Experimental support for UFL, supporting both .form and .ufl
- - Moved configuration and construction of python extension module to ufc_module
-0.6.1 [2009-02-18]
- - Initial work on UFL transition
- - Minor bug fixes
- - The version of ufc and swig is included in the form signature
- - Better system configuration for JIT compiled forms
- - The JIT compiled python extension module use shared_ptr for all classes
-0.6.0 [2009-01-05]
- - Update DOLFIN output format (-l dolfin) for DOLFIN 0.9.0
- - Cross-platform fixes for test scripts
- - Minor bug fix for quadrature code generation (forms affected by this bug would not be able to compile
- - Fix bug with output of *.py.
- - Permit dot product bewteen rectangular matrices (Frobenius norm)
-0.5.1 [2008-10-20]
- - New operator skew()
- - Allow JIT compilation of elements and dof maps
- - Rewrite JIT compiler to rely on Instant for caching
- - Display flop count for evaluating the element tensor during compilation
- - Add arguments language and representation to options dictionary
- - Fix installation on Windows
- - Add option -f split_implementation for separate .h and .cpp files
-0.5.0 [2008-06-23]
- - Remove default restriction +/- for Constant
- - Make JIT optimization (-O0 / -O2) optional
- - Add in-memory cache to speed up JIT compiler for repeated assembly
- - Allow subdomain integrals without needing full range of integrals
- - Allow simple subdomain integral specification dx(0), dx(1), ds(0) etc
-0.4.5 [2008-04-30]
- - Optimizations in generated quadrature code
- - Change formatting of floats from %g to %e, fixes problem with too long integers
- - Bug fix for order of values in interpolate_vertex_values, now according to UFC
- - Speed up JIT compiler
- - Add index ranges to form printing
- - Throw runtime error in functions not generated
- - Update DOLFIN format for new location of include files
-0.4.4 [2008-02-18]
- - RT, BDM, BDFM and Nedelec now working in 2D and 3D
- - New element type QuadratureElement
- - Add support for 1D elements
- - Add experimental support for new Darcy-Stokes element
- - Use FIAT transformed spaces instead of mapping in FFC
- - Updates for UFC 1.1
- - Implement caching of forms/modules in ~/.ffc/cache for JIT compiler
- - Add script ffc-clean
- - New operators lhs() and rhs()
- - Bug fixes in simplify
- - Bug fixes for Nedelec and BDFM
- - Fix bug in mult()
- - Fix bug with restrictions on exterior facet integrals
- - Fix bug in grad() for vectors
- - Add divergence operator for matrices
-0.4.3 [2007-10-23]
- - Require FIAT to use UFC reference cells
- - Fix bug in form simplification
- - Rename abs --> modulus to avoid conflict with builtin abs
- - Fix bug in operators invert, abs, sqrt
- - Fix bug in integral tabulation
- - Add BDFM and Nedelec elements (nonworking)
- - Fix bug in JIT compiler
-0.4.2 [2007-08-31]
- - Change license from GPL v2 to GPL v3 or later
- - Add JIT (just-in-time) compiler
- - Fix bug for constants on interior facets
-0.4.1 [2007-06-22]
- - Fix bug in simplification of forms
- - Optimize removal of unused terms in code formattting
-0.4.0 [2007-06-20]
- - Move to UFC interface for code generation
- - Major rewrite, restructure, cleanup
- - Add support for Brezzi-Douglas-Marini (BDM) elements
- - Add support for Raviart-Thomas (RT) elements
- - Add support for Discontinuous Galerkin (DG) methods
- - Operators jump() and avg()
- - Add quadrature compilation mode (experimental)
- - Simplification of forms
- - Operators sqrt(), abs() and inverse
- - Improved Python interface
- - Add flag -f precision=n
- - Generate code for basis functions and derivatives
- - Use Set from set module for Python2.3 compatibility
-0.3.5 [2006-12-01]
- - Bug fixes
- - Move from Numeric to numpy
-0.3.4 [2006-10-27]
- - Updates for new DOLFIN mesh library
- - Add support for evaluation of functionals
- - Add operator outer() for outer product of vector-valued functions
- - Enable optimization of linear forms (in addition to bilinear forms)
- - Remove DOLFIN SWIG format
- - Fix bug in ffc -v/--version (thanks to Ola Skavhaug)
- - Consolidate DOLFIN and DOLFIN SWIG formats (patch from Johan Jansson)
- - Fix bug in optimized compilation (-O) for some forms ("too many values to unpack")
-0.3.3 [2006-09-05]
- - Fix bug in operator div()
- - Add operation count (number of multiplications) with -d0
- - Add hint for printing more informative error messages (flag -d1)
- - Modify implementation of vertexeval()
- - Add support for boundary integrals (Garth N. Wells)
-0.3.2 [2006-04-01]
- - Add support for FErari optimizations, new flag -O
-0.3.1 [2006-03-28]
- - Remove verbose output: silence means success
- - Generate empty boundary integral eval() to please Intel C++ compiler
- - New classes TestFunction and TrialFunction
-0.3.0 [2006-03-01]
- - Work on manual, document command-line and user-interfaces
- - Name change: u --> U
- - Add compilation of elements without form
- - Add generation of FiniteElementSpec in DOLFIN formats
- - Fix bugs in raw and XML formats
- - Fix bug in LaTeX format
- - Fix path and predefine tokens to enable import in .form file
- - Report number of entries in reference tensor during compilation
-0.2.5 [2005-12-28]
- - Add demo Stabilization.form
- - Further speedup computation of reference tensor (use ufunc Numeric.add)
-0.2.4 [2005-12-05]
- - Report time taken to compute reference tensor
- - Restructure computation of reference tensor to use less memory.
-   As a side effect, the speed has also been improved.
- - Update for DOLFIN name change node --> vertex
- - Update finite element interface for DOLFIN
- - Check for FIAT bug in discontinuous vector Lagrange elements
- - Fix signatures for vector-valued elements
-0.2.3 [2005-11-28]
- - New fast Numeric/BLAS based algorithm for computing reference tensor
- - Bug fix: reassign indices for complete subexpressions
- - Bug fix: operator Function * Integral
- - Check tensor notation for completeness
- - Bug fix: mixed elements with more than two function spaces
- - Don't declare unused coefficients (or gcc will complain)
-0.2.2 [2005-11-14]
- - Add command-line argument -v / --version
- - Add new operator mean() for projection onto piecewise constants
- - Add support for projections
- - Bug fix for higher order mixed elements: declaration of edge/face_ordering
- - Generate code for sub elements of mixed elements
- - Add new test form: TensorWeighteLaplacian
- - Add new test form: EnergyNorm
- - Fix bugs in mult() and vec() (skavhaug)
- - Reset correct entries of G for interior in BLAS mode
- - Only assign to entries of G that meet nonzero entries of A in BLAS mode
-0.2.1 [2005-10-11]
- - Only generate declarations that are needed according to format
- - Check for missing options and add missing default options
- - Simplify usage of FFC as Python module: from ffc import *
- - Fix bug in division with constants
- - Generate output for BLAS (with option -f blas)
- - Add new XML output format
- - Remove command-line option --license (collect in compiler options -f)
- - Modify demo Mass.form to use 3:rd order Lagrange on tets
- - Fix bug in dofmap() for equal order mixed elements
- - Add compiler option -d debuglevel
- - Fix Python Numeric bug: vdot --> dot
-0.2.0 [2005-09-23]
- - Generate function vertexeval() for evaluation at vertices
- - Add support for arbitrary mixed elements
- - Add man page
- - Work on manual, chapters on form language, quickstart and installation
- - Handle exceptions gracefully in command-line interface
- - Use new template fenicsmanual.cls for manual
- - Add new operators grad, div, rot (curl), D, rank, trace, dot, cross
- - Factorize common reference tensors from terms with equal signatures
- - Collect small building blocks for form algebra in common module tokens.py
-0.1.9 [2005-07-05]
- - Complete support for general order Lagrange elements on triangles and tetrahedra
- - Compute reordering of dofs on tets correctly
- - Update manual with ordering of dofs
- - Break compilation into two phases: build() and write()
- - Add new output format ASE (Matt Knepley)
- - Improve python interface to FFC
- - Remove excessive logging at compilation
- - Fix bug in raw output format
-0.1.8 [2005-05-17]
- - Access data through map in DOLFIN format
- - Experimental support for computation of coordinate maps
- - Add first draft of manual
- - Experimental support for computation of dof maps
- - Allow specification of the number of components for vector Lagrange
- - Count the number of zeros dropped
- - Fix bug in handling command-line arguments
- - Use module sets instead of built-in set (fix for Python 2.3)
- - Handle constant indices correctly (bug reported by Garth N. Wells)
-0.1.7 [2005-05-02]
- - Write version number to output
- - Add command-line option for choosing license
- - Display usage if no input is given
- - Bug fix for finding correct prefix of file name
- - Automatically choose name of output file (if not supplied)
- - Use FIAT tabulation mode for vector-valued elements (speedup a factor 5)
- - Use FIAT tabulation mode for scalar elements (speedup a factor 1000)
- - Fig bug in demo elasticity.form (change order of u and v)
- - Make references to constants const in DOLFIN format
- - Don't generate code for unused entries of geometry tensor
- - Update formats to write numeric constants with full precision
-0.1.6 [2005-03-17]
- - Add support for mixing multiple different finite elements
- - Add support for division with constants
- - Fix index bug (reverse order of multi-indices)
-0.1.5 [2005-03-14]
- - Automatically choose the correct quadrature rule for precomputation
- - Add test program for verification of FIAT quadrature rules
- - Fix bug for derivative of sum
- - Improve common interface for debugging: add indentation
- - Add support for constants
- - Fix bug for sums of more than one term (make copies of references in lists)
- - Add '_' in naming of geometry tensor (needed for large dimensions)
- - Add example elasticity.form
- - Cleanup build_indices()
-0.1.4-1 [2005-02-07]
- - Fix version number and remove build directory from tarball
-0.1.4 [2005-02-04]
- - Fix bug for systems, seems to work now
- - Add common interface for debugging
- - Modify DOLFIN output to initialize functions
- - Create unique numbers for each function
- - Use namespaces for DOLFIN output instead of class names
- - Temporary implementation of dof mapping for vector-valued elements
- - Make DOLFIN output format put entries into PETSc block
- - Change name of coefficient data: c%d[%d] -> c[%d][%d]
- - Change ordering of basis functions (one component at a time)
- - Add example poissonsystem.form
- - Modifications for new version of FIAT (FIAT-L)
-   FIAT version 0.1 a factor 5 slower (no memoization)
-   FIAT version 0.1.1 a little faster, only a factor 2 slower
- - Add setup.py script
-0.1.3 [2004-12-06]
- - Fix bug in DOLFIN format (missing value when zero)
- - Add output of reference tensor to LaTeX format
- - Make raw output format print data with full precision
- - Add component diagram
- - Change order of declaration of basis functions
- - Add new output format raw
-0.1.2 [2004-11-17]
- - Add command-line interface ffc
- - Add support for functions (coefficients)
- - Add support for constants
- - Allow multiple forms (left- and right-hand side) in same file
- - Add test examples: poisson.form, mass.form, navierstokes.form
- - Wrap FIAT to create vector-valued finite element spaces
- - Check ranks of operands
- - Clean up algebra, add base class Element
- - Add some documentation (class diagram)
- - Add support for LaTeX output
-0.1.1-1 [2004-11-10]
- - Add missing file declaration.py
-0.1.1 [2004-11-10]
- - Make output variable names configurable
- - Clean up DOLFIN code generation
- - Post-process form to create reference, geometry, and element tensors
- - Experimental support for general tensor-valued elements
- - Clean up and improve index reassignment
- - Use string formatting for generation of output
- - Change index ordering to access row-wise
-0.1.0 [2004-10-22]
- - First iteration of the FEniCS Form Compiler
- - Change boost::shared_ptr --> std::shared_ptr
-
-ChangeLog for UFC
-=================
-
-UFC was merged into FFC 2014-02-18. Below is the ChangeLog for
-UFC at the time of the merge. From this point onward, UFC version
-numbering restarts at the same version number as FFC and the rest
-of FEniCS.
-
-2.3.0 [2014-01-07]
- - Use std::vector<std::vector<std::size_t> > for topology data
- - Remove vertex coordinates from ufc::cell
- - Improve detection of compatible Python libraries
- - Add current swigversion to the JIT compiled extension module
- - Remove dofmap::max_local_dimension()
- - Remove cell argument from dofmap::local_dimension()
-2.2.0 [2013-03-24]
- - Add new class ufc::point_integral
- - Use CMake to configure JIT compilation of forms
- - Generate UseUFC.cmake during configuration
- - Remove init_mesh(), init_cell(), init_mesh_finalize()
- - Remove ufc::mesh and add a vector of num_mesh_entities to global_dimension() and tabulate_dofs().
-2.1.0 [2013-01-07]
- - Fix bug introduced by SWIG 2.0.5, which treated uint as Python long
- - Add optimization SWIG flags, fixing bug lp:987657
-2.0.5 [2011-12-07]
- - Improve configuration of libboost-math
-2.0.4 [2011-11-28]
- - Add boost_math_tr1 to library flags when JIT compiling an
-   extension module
-2.0.3 [2011-10-26]
- - CMake config improvements
-2.0.2 [2011-08-11]
- - Some tweaks of installation
-2.0.1 [2011-05-16]
- - Make SWIG version >= 2.0 a requirement
- - Add possibility to set swig binary and swig path
- - Add missing const for map_{from,to}_reference_cell
-2.0.0 [2011-02-23]
- - Add quadrature version of tabulate_tensor
- - Add finite_element::map_{from,to}_reference_cell
- - Add finite_element::{topological,geometric}_dimension
- - Add dofmap::topological_dimension
- - Rename num_foo_integrals --> num_foo_domains
- - Rename dof_map --> dofmap
- - Add finite_element::create
- - Add dofmap::create
-1.4.2 [2010-09-01]
- - Move to CMake build system
-1.4.1 [2010-07-01]
- - Make functions introduced in UFC 1.1 mandatory (now pure virtual)
- - Update templates to allow constructor arguments in form classes
-1.4.0 [2010-02-01]
- - Changed behavior of create_foo_integral (returning 0 when integral is 0)
- - Bug fixes in installation
-1.2.0 [2009-09-23]
- - Add new function ufc::dof_map::max_local_dimension()
- - Change ufc::dof_map::local_dimension() to ufc::dof_map::local_dimension(const ufc::cell c)
-1.1.2 [2009-04-07]
- - Added configuration and building of python extension module to ufc_utils.build_ufc_module
-1.1.1 [2009-02-20]
- - The extension module is now not built, if the conditions for shared_ptr are not met
- - Added SCons build system
- - The swig generated extension module will be compiled with shared_ptr support if boost is found on system and swig is of version 1.3.35 or higher
- - The swig generated extension module is named ufc.py and expose all ufc base classes to python
- - Added a swig generated extention module to ufc. UFC now depends on swig
- - Changed name of the python utility module from "ufc" to "ufc_utils"
-1.1.0 [2008-02-18]
- - Add new function ufc::finite_element::evaluate_dofs
- - Add new function ufc::finite_element::evaluate_basis_all
- - Add new function ufc::finite_element::evaluate_basis_derivatives_all
- - Add new function ufc::dof_map::geometric_dimension
- - Add new function ufc::dof_map::num_entity_dofs
- - Add new function ufc::dof_map::tabulate_entity_dofs
-1.0.0 [2007-06-17]
- - Release of UFC 1.0
diff --git a/ChangeLog.rst b/ChangeLog.rst
new file mode 100644
index 0000000..d42eec7
--- /dev/null
+++ b/ChangeLog.rst
@@ -0,0 +1,698 @@
+Changelog
+=========
+
+2017.1.0 (2017-05-09)
+---------------------
+
+- Let ffc -O parameter take an optional integer level like -O2, -O0
+- Implement blockwise optimizations in uflacs code generation
+- Expose uflacs optimization parameters through parameter system
+
+2016.2.0 (2016-11-30)
+---------------------
+
+- Jit compiler now compiles elements separately from forms to avoid duplicate work
+- Add parameter max_signature_length to optionally shorten signatures in the jit cache
+- Move uflacs module into ffc.uflacs
+- Remove installation of pkg-config and CMake files (UFC path and
+  compiler flags are available from ffc module)
+- Add dependency on dijitso and remove dependency on instant
+- Add experimental Bitbucket pipelines
+- Tidy the repo after UFC and UFLACS merge, and general spring cleanup. This
+  includes removal of instructions how to merge two repos, commit hash
+  c8389032268041fe94682790cb773663bdf27286.
+
+2016.1.0 (2016-06-23)
+---------------------
+
+- Add function get_ufc_include to get path to ufc.h
+- Merge UFLACS into FFC
+- Generalize ufc interface to non-affine parameterized coordinates
+- Add ufc::coordinate_mapping class
+- Make ufc interface depend on C++11 features requiring gcc version >= 4.8
+- Add function ufc_signature() to the form compiler interface
+- Add function git_commit_hash()
+
+1.6.0 (2015-07-28)
+------------------
+
+- Rename and modify a number of UFC interface functions. See docstrings in ufc.h for details.
+- Bump required SWIG version to 3.0.3
+- Disable dual basis (tabulate_coordinates and evaluate_dofs) for enriched
+  elements until correct implementation is brought up
+
+1.5.0 (2015-01-12)
+------------------
+
+- Remove FErari support
+- Add support for new integral type custom_integral
+- Support for new form compiler backend "uflacs", downloaded separately
+
+1.4.0 (2014-06-02)
+------------------
+
+- Add support for integrals that know which coefficients they use
+- Many bug fixes for facet integrals over manifolds
+- Merge UFC into FFC; ChangeLog for UFC appended below
+- Various updates mirroring UFL changes
+- Experimental: New custom integral with user defined quadrature points
+
+1.3.0 (2014-01-07)
+------------------
+
+- Fix bug with runtime check of SWIG version
+- Move DOLFIN wrappers here from DOLFIN
+- Add support for new UFL operators cell_avg and facet_avg
+- Add new reference data handling system, now data is kept in an external repository
+- Fix bugs with ignoring quadrature rule arguments
+- Use cpp optimization by default in jit compiler
+
+1.2.0 (2013-03-24)
+------------------
+
+- New feature: Add basic support for point integrals on vertices
+- New feature: Add general support for m-dimensional cells in n-dimensional space (n >= m, n, m = 1, 2, 3)
+
+1.1.0 (2013-01-07)
+------------------
+
+- Fix bug for Conditionals related to DG constant Coefficients. Bug #1082048.
+- Fix bug for Conditionals, precedence rules for And and Or. Bug #1075149.
+- Changed data structure from list to deque when pop(0) operation is needed, speeding up split_expression operation considerable
+- Other minor fixes
+
+1.0.0 (2011-12-07)
+------------------
+
+- Issue warning when form integration requires more than 100 points
+
+1.0-rc1 (2011-11-28)
+--------------------
+
+- Fix bug with coordinates on facet integrals (intervals). Bug #888682.
+- Add support for FacetArea, new geometric quantity in UFL.
+- Fix bug in optimised quadrature code, AlgebraOperators demo. Bug #890859.
+- Fix bug with undeclared variables in optimised quadrature code. Bug #883202.
+
+1.0-beta2 (2011-10-11)
+----------------------
+
+- Added support for bessel functions, bessel_* (I,J,K,Y), in UFL.
+- Added support for error function, erf(), new math function in UFL.
+- Fix dof map 'need_entities' for Real spaces
+- Improve performance for basis function computation
+
+1.0-beta (2011-08-11)
+---------------------
+
+- Improve formatting of floats with up to one non-zero decimal place.
+- Fix bug involving zeros in products and sums. Bug #804160.
+- Fix bug for new conditions '&&', '||' and '!' in UFL. Bug #802560.
+- Fix bug involving VectorElement with dim=1. Bug #798578.
+- Fix bug with mixed element of symmetric tensor elements. Bug #745646.
+- Fix bug when using geometric coordinates with one quadrature point
+
+0.9.10 (2011-05-16)
+-------------------
+
+- Change license from GPL v3 or later to LGPL v3 or later
+- Add some schemes for low-order simplices
+- Request quadrature schemes by polynomial degree (not longer by number
+  of points in each direction)
+- Get quadrature schemes via ffc.quadrature_schemes
+- Improved lock handling in JIT compiler
+- Include common_cell in form signature
+- Add possibility to set swig binary and swig path
+
+0.9.9 (2011-02-23)
+------------------
+
+- Add support for generating error control forms with option -e
+- Updates for UFC 2.0
+- Set minimal degree to 1 in automatic degree selection for expressions
+- Add command-line option -f no_ferari
+- Add support for plotting of elements
+- Add utility function compute_tensor_representation
+
+0.9.4 (2010-09-01)
+------------------
+
+- Added memory cache in jit(), for preprocessed forms
+- Added support for Conditional and added demo/Conditional.ufl.
+- Added support for new geometric quantity Circumradius in UFL.
+- Added support for new geometric quantity CellVolume in UFL.
+
+0.9.3 (2010-07-01)
+------------------
+
+- Make global_dimension for Real return an int instead of double, bug # 592088
+- Add support for facet normal in 1D.
+- Expose -feliminate_zeros for quadrature optimisations to give user more
+  control
+- Remove return of form in compile_form
+- Remove object_names argument to compile_element
+- Rename ElementUnion -> EnrichedElement
+- Add support for tan() and inverse trigonometric functions
+- Added support for ElementUnion (i.e. span of combinations of elements)
+- Added support for Bubble elements
+- Added support for UFL.SpatialCoordinate.
+
+0.9.2 (2010-02-17)
+------------------
+
+- Bug fix in removal of unused variables in Piola-mapped terms for tensor
+  representation
+
+0.9.1 (2010-02-15)
+------------------
+
+- Add back support for FErari optimizations
+- Bug fixes in JIT compiler
+
+0.9.0 (2010-02-02)
+------------------
+
+- Updates for FIAT 0.9.0
+- Updates for UFC 1.4.0 (now supporting the full interface)
+- Automatic selection of representation
+- Change quadrature_order --> quadrature_degree
+- Split compile() --> compile_form(), compile_element()
+- Major cleanup and reorganization of code (flatter directories)
+- Updates for changes in UFL: Argument, Coefficient, FormData
+
+0.7.1
+-----
+
+- Handle setting quadrature degree when it is set to None in UFL form
+- Added demo: HyperElasticity.ufl
+
+0.7.0
+-----
+
+- Move contents of TODO to: https://blueprints.launchpad.net/ffc
+- Support for restriction of finite elements to only consider facet dofs
+- Use quadrature_order from metadata when integrating terms using tensor representation
+- Use loop to reset the entries of the local element tensor
+- Added new symbolic classes for quadrature optimisation (speed up compilation)
+- Added demos: Biharmonic.ufl, div(grad(v)) term;
+               ReactionDiffusion.ufl, tuple notation;
+               MetaData.ufl, how to attach metadata to the measure;
+               ElementRestriction.ufl, restriction of elements to facets
+- Tabulate the coordinates of the integration points in the tabulate_tensor() function
+- Change command line option '-f split_implementation' -> '-f split'
+- Renaming of files and restructuring of the compiler directory
+- Added option -q rule (--quadrature-rule rule) to specify which rule to use
+  for integration of a given integral. (Can also bet set through the metadata
+  through "quadrature_rule"). No rules have yet been implemented, so default
+  is the FIAT rule.
+- Remove support for old style .form files/format
+
+0.6.2 (2009-04-07)
+------------------
+
+- Experimental support for UFL, supporting both .form and .ufl
+- Moved configuration and construction of python extension module to ufc_module
+
+0.6.1 (2009-02-18)
+------------------
+
+- Initial work on UFL transition
+- Minor bug fixes
+- The version of ufc and swig is included in the form signature
+- Better system configuration for JIT compiled forms
+- The JIT compiled python extension module use shared_ptr for all classes
+
+0.6.0 (2009-01-05)
+------------------
+
+- Update DOLFIN output format (-l dolfin) for DOLFIN 0.9.0
+- Cross-platform fixes for test scripts
+- Minor bug fix for quadrature code generation (forms affected by this bug would not be able to compile
+- Fix bug with output of ``*.py``.
+- Permit dot product bewteen rectangular matrices (Frobenius norm)
+
+0.5.1 (2008-10-20)
+------------------
+
+- New operator skew()
+- Allow JIT compilation of elements and dof maps
+- Rewrite JIT compiler to rely on Instant for caching
+- Display flop count for evaluating the element tensor during compilation
+- Add arguments language and representation to options dictionary
+- Fix installation on Windows
+- Add option -f split_implementation for separate .h and .cpp files
+
+0.5.0 (2008-06-23)
+------------------
+
+- Remove default restriction +/- for Constant
+- Make JIT optimization (-O0 / -O2) optional
+- Add in-memory cache to speed up JIT compiler for repeated assembly
+- Allow subdomain integrals without needing full range of integrals
+- Allow simple subdomain integral specification dx(0), dx(1), ds(0) etc
+
+0.4.5 (2008-04-30)
+------------------
+
+- Optimizations in generated quadrature code
+- Change formatting of floats from %g to %e, fixes problem with too long integers
+- Bug fix for order of values in interpolate_vertex_values, now according to UFC
+- Speed up JIT compiler
+- Add index ranges to form printing
+- Throw runtime error in functions not generated
+- Update DOLFIN format for new location of include files
+
+0.4.4 (2008-02-18)
+------------------
+
+- RT, BDM, BDFM and Nedelec now working in 2D and 3D
+- New element type QuadratureElement
+- Add support for 1D elements
+- Add experimental support for new Darcy-Stokes element
+- Use FIAT transformed spaces instead of mapping in FFC
+- Updates for UFC 1.1
+- Implement caching of forms/modules in ~/.ffc/cache for JIT compiler
+- Add script ffc-clean
+- New operators lhs() and rhs()
+- Bug fixes in simplify
+- Bug fixes for Nedelec and BDFM
+- Fix bug in mult()
+- Fix bug with restrictions on exterior facet integrals
+- Fix bug in grad() for vectors
+- Add divergence operator for matrices
+
+0.4.3 (2007-10-23)
+------------------
+
+- Require FIAT to use UFC reference cells
+- Fix bug in form simplification
+- Rename abs --> modulus to avoid conflict with builtin abs
+- Fix bug in operators invert, abs, sqrt
+- Fix bug in integral tabulation
+- Add BDFM and Nedelec elements (nonworking)
+- Fix bug in JIT compiler
+
+0.4.2 (2007-08-31)
+------------------
+
+- Change license from GPL v2 to GPL v3 or later
+- Add JIT (just-in-time) compiler
+- Fix bug for constants on interior facets
+
+0.4.1 (2007-06-22)
+------------------
+
+- Fix bug in simplification of forms
+- Optimize removal of unused terms in code formattting
+
+0.4.0 (2007-06-20)
+------------------
+
+- Move to UFC interface for code generation
+- Major rewrite, restructure, cleanup
+- Add support for Brezzi-Douglas-Marini (BDM) elements
+- Add support for Raviart-Thomas (RT) elements
+- Add support for Discontinuous Galerkin (DG) methods
+- Operators jump() and avg()
+- Add quadrature compilation mode (experimental)
+- Simplification of forms
+- Operators sqrt(), abs() and inverse
+- Improved Python interface
+- Add flag -f precision=n
+- Generate code for basis functions and derivatives
+- Use Set from set module for Python2.3 compatibility
+
+0.3.5 (2006-12-01)
+------------------
+
+- Bug fixes
+- Move from Numeric to numpy
+
+0.3.4 (2006-10-27)
+------------------
+
+- Updates for new DOLFIN mesh library
+- Add support for evaluation of functionals
+- Add operator outer() for outer product of vector-valued functions
+- Enable optimization of linear forms (in addition to bilinear forms)
+- Remove DOLFIN SWIG format
+- Fix bug in ffc -v/--version (thanks to Ola Skavhaug)
+- Consolidate DOLFIN and DOLFIN SWIG formats (patch from Johan Jansson)
+- Fix bug in optimized compilation (-O) for some forms ("too many values to unpack")
+
+0.3.3 (2006-09-05)
+------------------
+
+- Fix bug in operator div()
+- Add operation count (number of multiplications) with -d0
+- Add hint for printing more informative error messages (flag -d1)
+- Modify implementation of vertexeval()
+- Add support for boundary integrals (Garth N. Wells)
+
+0.3.2 (2006-04-01)
+------------------
+
+- Add support for FErari optimizations, new flag -O
+
+0.3.1 (2006-03-28)
+------------------
+
+- Remove verbose output: silence means success
+- Generate empty boundary integral eval() to please Intel C++ compiler
+- New classes TestFunction and TrialFunction
+
+0.3.0 (2006-03-01)
+------------------
+
+- Work on manual, document command-line and user-interfaces
+- Name change: u --> U
+- Add compilation of elements without form
+- Add generation of FiniteElementSpec in DOLFIN formats
+- Fix bugs in raw and XML formats
+- Fix bug in LaTeX format
+- Fix path and predefine tokens to enable import in .form file
+- Report number of entries in reference tensor during compilation
+
+0.2.5 (2005-12-28)
+------------------
+
+- Add demo Stabilization.form
+- Further speedup computation of reference tensor (use ufunc Numeric.add)
+
+0.2.4 (2005-12-05)
+------------------
+
+- Report time taken to compute reference tensor
+- Restructure computation of reference tensor to use less memory.
+  As a side effect, the speed has also been improved.
+- Update for DOLFIN name change node --> vertex
+- Update finite element interface for DOLFIN
+- Check for FIAT bug in discontinuous vector Lagrange elements
+- Fix signatures for vector-valued elements
+
+0.2.3 (2005-11-28)
+------------------
+
+- New fast Numeric/BLAS based algorithm for computing reference tensor
+- Bug fix: reassign indices for complete subexpressions
+- Bug fix: operator Function * Integral
+- Check tensor notation for completeness
+- Bug fix: mixed elements with more than two function spaces
+- Don't declare unused coefficients (or gcc will complain)
+
+0.2.2 (2005-11-14)
+------------------
+
+- Add command-line argument -v / --version
+- Add new operator mean() for projection onto piecewise constants
+- Add support for projections
+- Bug fix for higher order mixed elements: declaration of edge/face_ordering
+- Generate code for sub elements of mixed elements
+- Add new test form: TensorWeighteLaplacian
+- Add new test form: EnergyNorm
+- Fix bugs in mult() and vec() (skavhaug)
+- Reset correct entries of G for interior in BLAS mode
+- Only assign to entries of G that meet nonzero entries of A in BLAS mode
+
+0.2.1 (2005-10-11)
+------------------
+
+- Only generate declarations that are needed according to format
+- Check for missing options and add missing default options
+- Simplify usage of FFC as Python module: from ffc import *
+- Fix bug in division with constants
+- Generate output for BLAS (with option -f blas)
+- Add new XML output format
+- Remove command-line option --license (collect in compiler options -f)
+- Modify demo Mass.form to use 3:rd order Lagrange on tets
+- Fix bug in dofmap() for equal order mixed elements
+- Add compiler option -d debuglevel
+- Fix Python Numeric bug: vdot --> dot
+
+0.2.0 (2005-09-23)
+------------------
+
+- Generate function vertexeval() for evaluation at vertices
+- Add support for arbitrary mixed elements
+- Add man page
+- Work on manual, chapters on form language, quickstart and installation
+- Handle exceptions gracefully in command-line interface
+- Use new template fenicsmanual.cls for manual
+- Add new operators grad, div, rot (curl), D, rank, trace, dot, cross
+- Factorize common reference tensors from terms with equal signatures
+- Collect small building blocks for form algebra in common module tokens.py
+
+0.1.9 (2005-07-05)
+------------------
+
+- Complete support for general order Lagrange elements on triangles and tetrahedra
+- Compute reordering of dofs on tets correctly
+- Update manual with ordering of dofs
+- Break compilation into two phases: build() and write()
+- Add new output format ASE (Matt Knepley)
+- Improve python interface to FFC
+- Remove excessive logging at compilation
+- Fix bug in raw output format
+
+0.1.8 (2005-05-17)
+------------------
+
+- Access data through map in DOLFIN format
+- Experimental support for computation of coordinate maps
+- Add first draft of manual
+- Experimental support for computation of dof maps
+- Allow specification of the number of components for vector Lagrange
+- Count the number of zeros dropped
+- Fix bug in handling command-line arguments
+- Use module sets instead of built-in set (fix for Python 2.3)
+- Handle constant indices correctly (bug reported by Garth N. Wells)
+
+0.1.7 (2005-05-02)
+------------------
+
+- Write version number to output
+- Add command-line option for choosing license
+- Display usage if no input is given
+- Bug fix for finding correct prefix of file name
+- Automatically choose name of output file (if not supplied)
+- Use FIAT tabulation mode for vector-valued elements (speedup a factor 5)
+- Use FIAT tabulation mode for scalar elements (speedup a factor 1000)
+- Fig bug in demo elasticity.form (change order of u and v)
+- Make references to constants const in DOLFIN format
+- Don't generate code for unused entries of geometry tensor
+- Update formats to write numeric constants with full precision
+
+0.1.6 (2005-03-17)
+------------------
+
+- Add support for mixing multiple different finite elements
+- Add support for division with constants
+- Fix index bug (reverse order of multi-indices)
+
+0.1.5 (2005-03-14)
+------------------
+
+- Automatically choose the correct quadrature rule for precomputation
+- Add test program for verification of FIAT quadrature rules
+- Fix bug for derivative of sum
+- Improve common interface for debugging: add indentation
+- Add support for constants
+- Fix bug for sums of more than one term (make copies of references in lists)
+- Add '_' in naming of geometry tensor (needed for large dimensions)
+- Add example elasticity.form
+- Cleanup build_indices()
+
+0.1.4-1 (2005-02-07)
+--------------------
+
+- Fix version number and remove build directory from tarball
+
+0.1.4 (2005-02-04)
+------------------
+
+- Fix bug for systems, seems to work now
+- Add common interface for debugging
+- Modify DOLFIN output to initialize functions
+- Create unique numbers for each function
+- Use namespaces for DOLFIN output instead of class names
+- Temporary implementation of dof mapping for vector-valued elements
+- Make DOLFIN output format put entries into PETSc block
+- Change name of coefficient data: c%d[%d] -> c[%d][%d]
+- Change ordering of basis functions (one component at a time)
+- Add example poissonsystem.form
+- Modifications for new version of FIAT (FIAT-L)
+  FIAT version 0.1 a factor 5 slower (no memoization)
+  FIAT version 0.1.1 a little faster, only a factor 2 slower
+- Add setup.py script
+
+0.1.3 (2004-12-06)
+------------------
+
+- Fix bug in DOLFIN format (missing value when zero)
+- Add output of reference tensor to LaTeX format
+- Make raw output format print data with full precision
+- Add component diagram
+- Change order of declaration of basis functions
+- Add new output format raw
+
+0.1.2 (2004-11-17)
+------------------
+
+- Add command-line interface ffc
+- Add support for functions (coefficients)
+- Add support for constants
+- Allow multiple forms (left- and right-hand side) in same file
+- Add test examples: poisson.form, mass.form, navierstokes.form
+- Wrap FIAT to create vector-valued finite element spaces
+- Check ranks of operands
+- Clean up algebra, add base class Element
+- Add some documentation (class diagram)
+- Add support for LaTeX output
+
+0.1.1-1 (2004-11-10)
+--------------------
+
+- Add missing file declaration.py
+
+0.1.1 (2004-11-10)
+------------------
+
+- Make output variable names configurable
+- Clean up DOLFIN code generation
+- Post-process form to create reference, geometry, and element tensors
+- Experimental support for general tensor-valued elements
+- Clean up and improve index reassignment
+- Use string formatting for generation of output
+- Change index ordering to access row-wise
+
+0.1.0 (2004-10-22)
+------------------
+
+- First iteration of the FEniCS Form Compiler
+- Change boost::shared_ptr --> std::shared_ptr
+
+ChangeLog for UFC
+=================
+
+UFC was merged into FFC 2014-02-18. Below is the ChangeLog for
+UFC at the time of the merge. From this point onward, UFC version
+numbering restarts at the same version number as FFC and the rest
+of FEniCS.
+
+2.3.0 (2014-01-07)
+------------------
+
+- Use std::vector<std::vector<std::size_t> > for topology data
+- Remove vertex coordinates from ufc::cell
+- Improve detection of compatible Python libraries
+- Add current swigversion to the JIT compiled extension module
+- Remove dofmap::max_local_dimension()
+- Remove cell argument from dofmap::local_dimension()
+
+2.2.0 (2013-03-24)
+------------------
+
+- Add new class ufc::point_integral
+- Use CMake to configure JIT compilation of forms
+- Generate UseUFC.cmake during configuration
+- Remove init_mesh(), init_cell(), init_mesh_finalize()
+- Remove ufc::mesh and add a vector of num_mesh_entities to global_dimension() and tabulate_dofs().
+
+2.1.0 (2013-01-07)
+------------------
+
+- Fix bug introduced by SWIG 2.0.5, which treated uint as Python long
+- Add optimization SWIG flags, fixing bug lp:987657
+
+2.0.5 (2011-12-07)
+------------------
+
+- Improve configuration of libboost-math
+
+2.0.4 (2011-11-28)
+------------------
+
+- Add boost_math_tr1 to library flags when JIT compiling an
+  extension module
+
+2.0.3 (2011-10-26)
+------------------
+
+- CMake config improvements
+
+2.0.2 (2011-08-11)
+------------------
+
+- Some tweaks of installation
+
+2.0.1 (2011-05-16)
+------------------
+
+- Make SWIG version >= 2.0 a requirement
+- Add possibility to set swig binary and swig path
+- Add missing const for map_{from,to}_reference_cell
+
+2.0.0 (2011-02-23)
+------------------
+
+- Add quadrature version of tabulate_tensor
+- Add finite_element::map_{from,to}_reference_cell
+- Add finite_element::{topological,geometric}_dimension
+- Add dofmap::topological_dimension
+- Rename num_foo_integrals --> num_foo_domains
+- Rename dof_map --> dofmap
+- Add finite_element::create
+- Add dofmap::create
+
+1.4.2 (2010-09-01)
+------------------
+
+- Move to CMake build system
+
+1.4.1 (2010-07-01)
+------------------
+
+- Make functions introduced in UFC 1.1 mandatory (now pure virtual)
+- Update templates to allow constructor arguments in form classes
+
+1.4.0 (2010-02-01)
+------------------
+
+- Changed behavior of create_foo_integral (returning 0 when integral is 0)
+- Bug fixes in installation
+
+1.2.0 (2009-09-23)
+------------------
+
+- Add new function ufc::dof_map::max_local_dimension()
+- Change ufc::dof_map::local_dimension() to ufc::dof_map::local_dimension(const ufc::cell c)
+
+1.1.2 (2009-04-07)
+------------------
+
+- Added configuration and building of python extension module to ufc_utils.build_ufc_module
+
+1.1.1 (2009-02-20)
+------------------
+
+- The extension module is now not built, if the conditions for shared_ptr are not met
+- Added SCons build system
+- The swig generated extension module will be compiled with shared_ptr support if boost is found on system and swig is of version 1.3.35 or higher
+- The swig generated extension module is named ufc.py and expose all ufc base classes to python
+- Added a swig generated extention module to ufc. UFC now depends on swig
+- Changed name of the python utility module from "ufc" to "ufc_utils"
+
+1.1.0 (2008-02-18)
+------------------
+
+- Add new function ufc::finite_element::evaluate_dofs
+- Add new function ufc::finite_element::evaluate_basis_all
+- Add new function ufc::finite_element::evaluate_basis_derivatives_all
+- Add new function ufc::dof_map::geometric_dimension
+- Add new function ufc::dof_map::num_entity_dofs
+- Add new function ufc::dof_map::tabulate_entity_dofs
+
+1.0.0 (2007-06-17)
+------------------
+
+- Release of UFC 1.0
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..090c88d
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,15 @@
+include AUTHORS
+include COPYING
+include COPYING.LESSER
+include ChangeLog
+include ChangeLog.uflacs
+include INSTALL
+include LICENSE
+include requirements.txt
+recursive-include bench *
+recursive-include demo *
+recursive-include doc *
+recursive-include ffc *.in
+recursive-include libs *
+recursive-include test *
+global-exclude __pycache__ *.pyc
diff --git a/README.rst b/README.rst
index e61d904..5c14b58 100644
--- a/README.rst
+++ b/README.rst
@@ -44,7 +44,7 @@ Code Coverage
 =============
 
 Code coverage reports can be viewed at
-https://coveralls.io/repos/bitbucket/fenics-project/ffc.
+https://coveralls.io/bitbucket/fenics-project/ffc.
 
 .. image:: https://coveralls.io/repos/bitbucket/fenics-project/ffc/badge.svg?branch=master
    :target: https://coveralls.io/bitbucket/fenics-project/ffc?branch=master
diff --git a/demo/AdaptivePoisson.ufl b/bench/Convection_3D_1.ufl
similarity index 67%
copy from demo/AdaptivePoisson.ufl
copy to bench/Convection_3D_1.ufl
index 46e1794..ebf70f5 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/bench/Convection_3D_1.ufl
@@ -1,4 +1,5 @@
-# Copyright (C) 2010 Marie E. Rognes
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Martin Alnæs
 #
 # This file is part of FFC.
 #
@@ -15,15 +16,14 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+# Form for convection matrix in a segregated Navier-Stokes solver
 
-u = TrialFunction(element)
-v = TestFunction(element)
+degree = 1
+U = FiniteElement("Lagrange", tetrahedron, degree)
+V = VectorElement("Lagrange", tetrahedron, degree)
 
-f = Coefficient(element2)
-g = Coefficient(element)
+v = TestFunction(U)
+u = TrialFunction(U)
+w = Coefficient(V)
 
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = dot(grad(u), w)*v*dx
diff --git a/demo/AdaptivePoisson.ufl b/bench/Convection_3D_2.ufl
similarity index 67%
copy from demo/AdaptivePoisson.ufl
copy to bench/Convection_3D_2.ufl
index 46e1794..8d00319 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/bench/Convection_3D_2.ufl
@@ -1,4 +1,5 @@
-# Copyright (C) 2010 Marie E. Rognes
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Martin Alnæs
 #
 # This file is part of FFC.
 #
@@ -15,15 +16,14 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+# Form for convection matrix in a segregated Navier-Stokes solver
 
-u = TrialFunction(element)
-v = TestFunction(element)
+degree = 2
+U = FiniteElement("Lagrange", tetrahedron, degree)
+V = VectorElement("Lagrange", tetrahedron, degree)
 
-f = Coefficient(element2)
-g = Coefficient(element)
+v = TestFunction(U)
+u = TrialFunction(U)
+w = Coefficient(V)
 
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = dot(grad(u), w)*v*dx
diff --git a/ffc/uflacs/elementtables/__init__.py b/bench/MassH1_3D_1.ufl
similarity index 55%
rename from ffc/uflacs/elementtables/__init__.py
rename to bench/MassH1_3D_1.ufl
index 63d581a..be198ec 100644
--- a/ffc/uflacs/elementtables/__init__.py
+++ b/bench/MassH1_3D_1.ufl
@@ -1,19 +1,23 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
+# Copyright (C) 2010 Anders Logg
 #
-# This file is part of UFLACS.
+# This file is part of FFC.
 #
-# UFLACS is free software: you can redistribute it and/or modify
+# FFC is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
-# UFLACS is distributed in the hope that it will be useful,
+# FFC is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-"""Tools for precomputed tables of terminal values."""
+element = FiniteElement("Lagrange", tetrahedron, 1)
+
+v = TestFunction(element)
+u = TrialFunction(element)
+
+a = v*u*dx
diff --git a/ffc/uflacs/generation/__init__.py b/bench/MassH1_3D_2.ufl
similarity index 55%
rename from ffc/uflacs/generation/__init__.py
rename to bench/MassH1_3D_2.ufl
index bea41cf..286bdcb 100644
--- a/ffc/uflacs/generation/__init__.py
+++ b/bench/MassH1_3D_2.ufl
@@ -1,19 +1,23 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
+# Copyright (C) 2010 Anders Logg
 #
-# This file is part of UFLACS.
+# This file is part of FFC.
 #
-# UFLACS is free software: you can redistribute it and/or modify
+# FFC is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Lesser General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
 # (at your option) any later version.
 #
-# UFLACS is distributed in the hope that it will be useful,
+# FFC is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-"""Algorithms for the code generation phase of form compilation."""
+element = FiniteElement("Lagrange", tetrahedron, 2)
+
+v = TestFunction(element)
+u = TrialFunction(element)
+
+a = v*u*dx
diff --git a/demo/AdaptivePoisson.ufl b/bench/Poisson_3D_1.ufl
similarity index 73%
copy from demo/AdaptivePoisson.ufl
copy to bench/Poisson_3D_1.ufl
index 46e1794..afb3fd2 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/bench/Poisson_3D_1.ufl
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Marie E. Rognes
+# Copyright (C) 2004-2010 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -15,15 +15,9 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+element = FiniteElement("Lagrange", tetrahedron, 1)
 
-u = TrialFunction(element)
 v = TestFunction(element)
+u = TrialFunction(element)
 
-f = Coefficient(element2)
-g = Coefficient(element)
-
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = inner(grad(v), grad(u))*dx
diff --git a/demo/AdaptivePoisson.ufl b/bench/Poisson_3D_2.ufl
similarity index 73%
copy from demo/AdaptivePoisson.ufl
copy to bench/Poisson_3D_2.ufl
index 46e1794..516eeb9 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/bench/Poisson_3D_2.ufl
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Marie E. Rognes
+# Copyright (C) 2004-2010 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -15,15 +15,9 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+element = FiniteElement("Lagrange", tetrahedron, 2)
 
-u = TrialFunction(element)
 v = TestFunction(element)
+u = TrialFunction(element)
 
-f = Coefficient(element2)
-g = Coefficient(element)
-
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = inner(grad(v), grad(u))*dx
diff --git a/demo/AdaptivePoisson.ufl b/bench/WeightedPoisson_3D_1.ufl
similarity index 73%
copy from demo/AdaptivePoisson.ufl
copy to bench/WeightedPoisson_3D_1.ufl
index 46e1794..e70c805 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/bench/WeightedPoisson_3D_1.ufl
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Marie E. Rognes
+# Copyright (C) 2010 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -15,15 +15,10 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+element = FiniteElement("Lagrange", tetrahedron, 1)
 
-u = TrialFunction(element)
 v = TestFunction(element)
+u = TrialFunction(element)
+c = Coefficient(element)
 
-f = Coefficient(element2)
-g = Coefficient(element)
-
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = c*inner(grad(v), grad(u))*dx
diff --git a/demo/AdaptivePoisson.ufl b/bench/WeightedPoisson_3D_2.ufl
similarity index 73%
copy from demo/AdaptivePoisson.ufl
copy to bench/WeightedPoisson_3D_2.ufl
index 46e1794..44e1471 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/bench/WeightedPoisson_3D_2.ufl
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Marie E. Rognes
+# Copyright (C) 2010 Anders Logg
 #
 # This file is part of FFC.
 #
@@ -15,15 +15,10 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+element = FiniteElement("Lagrange", tetrahedron, 2)
 
-u = TrialFunction(element)
 v = TestFunction(element)
+u = TrialFunction(element)
+c = Coefficient(element)
 
-f = Coefficient(element2)
-g = Coefficient(element)
-
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = c*inner(grad(v), grad(u))*dx
diff --git a/bench/bench.py b/bench/bench.py
index d63d566..fff8fd0 100644
--- a/bench/bench.py
+++ b/bench/bench.py
@@ -29,7 +29,7 @@ from utils import print_table
 
 # Test options
 test_options = ["-r tensor",
-                "-r tensor -O",
+                #"-r tensor -O",
                 "-r quadrature",
                 "-r quadrature -O",
                 "-r uflacs"]
diff --git a/bench/utils.py b/bench/utils.py
index 4dcd1df..280d7bb 100644
--- a/bench/utils.py
+++ b/bench/utils.py
@@ -19,6 +19,8 @@
 # First added:  2010-05-11
 # Last changed: 2010-05-11
 
+from __future__ import print_function
+
 def print_table(values, title):
     "Print nicely formatted table."
 
@@ -47,13 +49,13 @@ def print_table(values, title):
     column_sizes = [max([len(table[i][j]) for i in range(m)]) for j in range(n)]
     row_size = sum(column_sizes) + 3*(len(column_sizes) - 1) + 2
 
-    print ""
+    print("")
     for i in range(m):
-        print " " + "-"*row_size
-        print "|",
+        print(" " + "-"*row_size)
+        print("|", end="")
         for j in range(n):
-            print table[i][j] + " "*(column_sizes[j] - len(table[i][j])),
-            print "|",
-        print ""
-    print " " + "-"*row_size
-    print ""
+            print(table[i][j] + " "*(column_sizes[j] - len(table[i][j])), end="")
+            print("|", end="")
+        print("")
+    print(" " + "-"*row_size)
+    print("")
diff --git a/demo/AdaptivePoisson.ufl b/demo/AdaptivePoisson.ufl
index 46e1794..a781336 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/demo/AdaptivePoisson.ufl
@@ -26,4 +26,4 @@ g = Coefficient(element)
 
 a = inner(grad(u), grad(v))*dx()
 L = f*v*dx() + g*v*ds()
-M = u*dx()
+M = v*dx()
diff --git a/demo/Biharmonic.ufl b/demo/Biharmonic.ufl
index 2713cb4..7cd7a13 100644
--- a/demo/Biharmonic.ufl
+++ b/demo/Biharmonic.ufl
@@ -30,11 +30,14 @@ element = FiniteElement("Lagrange", triangle, 2)
 # Trial and test functions
 u = TrialFunction(element)
 v = TestFunction(element)
-f = Coefficient(element)
 
-# Normal component, mesh size and right-hand side
+# Facet normal, mesh size and right-hand side
 n = FacetNormal(triangle)
-h = Constant(triangle)
+h = 2.0*Circumradius(triangle)
+f = Coefficient(element)
+
+# Compute average of mesh size
+h_avg = (h('+') + h('-'))/2.0
 
 # Parameters
 alpha = Constant(triangle)
@@ -43,7 +46,7 @@ alpha = Constant(triangle)
 a = inner(div(grad(u)), div(grad(v)))*dx \
   - inner(jump(grad(u), n), avg(div(grad(v))))*dS \
   - inner(avg(div(grad(u))), jump(grad(v), n))*dS \
-  + alpha('+')/h('+')*inner(jump(grad(u),n), jump(grad(v),n))*dS
+  + alpha/h_avg*inner(jump(grad(u),n), jump(grad(v),n))*dS
 
 # Linear form
 L = f*v*dx
diff --git a/demo/FacetIntegrals.ufl b/demo/FacetIntegrals.ufl
index b5f1c4f..fcf7a66 100644
--- a/demo/FacetIntegrals.ufl
+++ b/demo/FacetIntegrals.ufl
@@ -27,8 +27,7 @@ element = FiniteElement("Discontinuous Lagrange", triangle, 1)
 u = TrialFunction(element)
 v = TestFunction(element)
 
-h = Constant(triangle)
-n = VectorConstant(triangle)
+n = FacetNormal(triangle)
 
 a = u*v*ds \
   + u('+')*v('-')*dS \
diff --git a/demo/Mini.ufl b/demo/Mini.ufl
index 0e09bab..a5dc7d0 100644
--- a/demo/Mini.ufl
+++ b/demo/Mini.ufl
@@ -22,11 +22,12 @@
 
 # Compile this form with FFC: ffc Mini.ufl
 
-P1 = VectorElement("Lagrange", triangle, 1)
-B = VectorElement("Bubble", triangle, 3)
+P1 = FiniteElement("Lagrange", triangle, 1)
+B = FiniteElement("Bubble", triangle, 3)
+V = VectorElement(P1 + B)
 Q = FiniteElement("CG", triangle, 1)
 
-Mini =  (P1 + B)*Q
+Mini = V*Q
 (u, p) = TrialFunctions(Mini)
 (v, q) = TestFunctions(Mini)
 
diff --git a/demo/AdaptivePoisson.ufl b/demo/MixedCoefficient.ufl
similarity index 63%
copy from demo/AdaptivePoisson.ufl
copy to demo/MixedCoefficient.ufl
index 46e1794..848ca12 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/demo/MixedCoefficient.ufl
@@ -1,4 +1,6 @@
-# Copyright (C) 2010 Marie E. Rognes
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2016 Miklós Homolya
 #
 # This file is part of FFC.
 #
@@ -14,16 +16,19 @@
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
+#
+# Mixed coefficient
+#
+# Compile this form with FFC: ffc MixedCoefficient.ufl
+
+cell = triangle
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+DG = VectorElement("DG", cell, 0)
+CG = FiniteElement("Lagrange", cell, 2)
+RT = FiniteElement("RT", cell, 3)
 
-u = TrialFunction(element)
-v = TestFunction(element)
+element = MixedElement(DG, CG, RT)
 
-f = Coefficient(element2)
-g = Coefficient(element)
+f, g, h = Coefficients(element)
 
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+forms = [dot(f('+'), h('-'))*dS + g*dx]
diff --git a/demo/PoissonDG.ufl b/demo/PoissonDG.ufl
index b25fc59..ff30038 100644
--- a/demo/PoissonDG.ufl
+++ b/demo/PoissonDG.ufl
@@ -30,11 +30,11 @@ element = FiniteElement("Discontinuous Lagrange", triangle, 1)
 # Trial and test functions
 u = TrialFunction(element)
 v = TestFunction(element)
-f = Coefficient(element)
 
-# Normal component, mesh size and right-hand side
+# Facet normal, mesh size and right-hand side
 n = FacetNormal(triangle)
-h = Constant(triangle)
+h = 2.0*Circumradius(triangle)
+f = Coefficient(element)
 
 # Compute average of mesh size
 h_avg = (h('+') + h('-'))/2.0
diff --git a/demo/AdaptivePoisson.ufl b/demo/PoissonQuad.ufl
similarity index 57%
copy from demo/AdaptivePoisson.ufl
copy to demo/PoissonQuad.ufl
index 46e1794..870abde 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/demo/PoissonQuad.ufl
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Marie E. Rognes
+# Copyright (C) 2016 Jan Blechta
 #
 # This file is part of FFC.
 #
@@ -14,16 +14,22 @@
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
+#
+# The bilinear form a(u, v) and linear form L(v) for
+# Poisson's equation using bilinear elements on bilinear mesh geometry.
+#
+# Compile this form with FFC: ffc PoissonQuad.ufl
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+coords = VectorElement("P", triangle, 2)
+mesh = Mesh(coords)
+dx = dx(mesh)
 
-u = TrialFunction(element)
-v = TestFunction(element)
+element = FiniteElement("P", mesh.ufl_cell(), 2)
+space = FunctionSpace(mesh, element)
 
-f = Coefficient(element2)
-g = Coefficient(element)
+u = TrialFunction(space)
+v = TestFunction(space)
+f = Coefficient(space)
 
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+a = inner(grad(u), grad(v))*dx
+L = f*v*dx
diff --git a/doc/sphinx/source/conf.py b/doc/sphinx/source/conf.py
index 7f07fe2..81ce3f0 100644
--- a/doc/sphinx/source/conf.py
+++ b/doc/sphinx/source/conf.py
@@ -15,6 +15,8 @@
 import sys
 import os
 import shlex
+import pkg_resources
+import datetime
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
@@ -36,6 +38,7 @@ extensions = [
     'sphinx.ext.coverage',
     'sphinx.ext.mathjax',
     'sphinx.ext.viewcode',
+    'sphinx.ext.todo',
 ]
 
 # Add any paths that contain templates here, relative to this directory.
@@ -54,20 +57,11 @@ master_doc = 'index'
 
 # General information about the project.
 project = u'FEniCS Form Compiler (FFC)'
-copyright = u'2015, FEniCS Project'
+this_year = datetime.date.today().year
+copyright = u'%s, FEniCS Project' % this_year
 author = u'FEniCS Project'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-import ffc
-ffc_version = ffc.__version__
-
-# The short X.Y version.
-version = ffc_version
-# The full version, including alpha/beta/rc tags.
-release = ffc_version
+version = pkg_resources.get_distribution("ffc").version
+release = version
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -111,7 +105,7 @@ pygments_style = 'sphinx'
 #keep_warnings = False
 
 # If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = False
+todo_include_todos = True
 
 
 # -- Options for HTML output ----------------------------------------------
diff --git a/doc/sphinx/source/installation.rst b/doc/sphinx/source/installation.rst
index 7eb13ec..2f6e34c 100644
--- a/doc/sphinx/source/installation.rst
+++ b/doc/sphinx/source/installation.rst
@@ -32,6 +32,62 @@ FFC also depends on the following FEniCS Python packages:
 These packages will be automatically installed as part of the
 installation of FFC, if not already present on your system.
 
+TSFC requirements
+-----------------
+
+To use experimental ``tsfc`` representation, additional
+dependencies are needed:
+
+* `TSFC <https://github.com/blechta/tsfc>`_ [1]_
+* `COFFEE <https://github.com/blechta/COFFEE>`_ [1]_
+* `FInAT <https://github.com/blechta/FInAT>`_ [1]_
+
+and in turn their additional dependencies:
+
+* singledispatch [2]_
+* networkx [2]_
+* PuLP [2]_, [4]_
+* GLPK [3]_, [4]_
+
+.. note:: TSFC requirements are not installed in FEniCS Docker
+    images by default yet but they can be easilly installed
+    on demand::
+
+        docker pull quay.io/fenicsproject/dev:latest
+        docker run -ti --rm quay.io/fenicsproject/dev:latest
+        sudo apt-get update && sudo apt-get -y install glpk-utils && \
+          pip2 install --prefix=${FENICS_PREFIX} --no-cache-dir \
+          git+https://github.com/blechta/tsfc.git@2017.1.0 \
+          git+https://github.com/blechta/COFFEE.git@2017.1.0 \
+          git+https://github.com/blechta/FInAT.git@2017.1.0 \
+          singledispatch networkx pulp && \
+          pip3 install --prefix=${FENICS_PREFIX} --no-cache-dir \
+          git+https://github.com/blechta/tsfc.git@2017.1.0 \
+          git+https://github.com/blechta/COFFEE.git@2017.1.0 \
+          git+https://github.com/blechta/FInAT.git@2017.1.0 \
+          singledispatch networkx pulp && \
+          sudo apt-get clean && \
+          sudo rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+    The first two commands (or their modification, or
+    ``fenicsproject`` helper script) are to be run on a host,
+    while the last command, to be run in the container, actually
+    installs all the TSFC requirements. For further reading,
+    see `FEniCS Docker reference
+    <https://fenics-containers.readthedocs.io/>`_.
+
+.. [1] These are forks of the original packages tested to be
+   compatible with FFC and updated frequently from upstream.
+
+.. [2] Pip-installable.
+
+.. [3] Binary package; ``glpsol`` executable needed. Version
+    ``GLPSOL: GLPK LP/MIP Solver, v4.57`` from Ubuntu 16.04
+    ``glpk-utils`` package is known to produce the same
+    references as our test system.
+
+.. [4] Needed for certain COFFEE optimizations.
+
 Installation instructions
 =========================
 
diff --git a/doc/sphinx/source/releases.rst b/doc/sphinx/source/releases.rst
index e28cf80..a5aba5b 100644
--- a/doc/sphinx/source/releases.rst
+++ b/doc/sphinx/source/releases.rst
@@ -5,6 +5,7 @@ Release notes
    :maxdepth: 2
 
    releases/next
+   releases/v2017.1.0
    releases/v2016.2.0
    releases/v2016.1.0
    releases/v1.6.0
diff --git a/doc/sphinx/source/releases/v2017.1.0.rst b/doc/sphinx/source/releases/v2017.1.0.rst
new file mode 100644
index 0000000..5a410a8
--- /dev/null
+++ b/doc/sphinx/source/releases/v2017.1.0.rst
@@ -0,0 +1,20 @@
+===========================
+Changes in version 2017.1.0
+===========================
+
+FFC 22017.1.0 was released on 2017-05-09.
+
+Summary of changes
+==================
+
+- Add experimental ``tsfc`` representation; for installation see
+  `the reference manual
+  <https://fenics.readthedocs.io/projects/ffc/en/latest/installation.html>`_
+
+
+Detailed changes
+================
+
+- Let ffc -O parameter take an optional integer level like -O2, -O0
+- Implement blockwise optimizations in uflacs code generation
+- Expose uflacs optimization parameters through parameter system
diff --git a/ffc/__init__.py b/ffc/__init__.py
index 667092a..959fff4 100644
--- a/ffc/__init__.py
+++ b/ffc/__init__.py
@@ -14,7 +14,10 @@ The interface consists of the following functions:
   ufc_signature      - Signature of UFC interface (SHA-1 hash of ufc.h)
 """
 
-__version__ = "2016.2.0"
+import pkg_resources
+
+__version__ = pkg_resources.get_distribution("ffc").version
+
 from ffc.git_commit_hash import git_commit_hash
 
 # Import compiler functions
@@ -58,5 +61,5 @@ except:
     supported_elements_for_plotting = []
 
 # Import main function, entry point to script
-from ffc.__main__ import main
+from ffc.main import main
 
diff --git a/ffc/__main__.py b/ffc/__main__.py
index 1533e5d..becb357 100644
--- a/ffc/__main__.py
+++ b/ffc/__main__.py
@@ -1,11 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-# This script is the command-line interface to FFC. It parses
-# command-line arguments and wraps the given form file code in a
-# Python module which is then executed.
-
-# Copyright (C) 2004-2016 Anders Logg
+# Copyright (C) 2017-2017 Martin Sandve Alnæs
 #
 # This file is part of FFC.
 #
@@ -21,243 +17,11 @@
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Johan Jansson, 2005.
-# Modified by Ola Skavhaug, 2006.
-# Modified by Dag Lindbo, 2008.
-# Modified by Kristian B. Oelgaard 2010.
-
-from __future__ import print_function
-
-# Python modules.
-import sys
-import getopt
-import cProfile
-import re
-import string
-import os
-from os import curdir
-from os import path
-from os import getcwd
-
-# UFL modules.
-from ufl.log import UFLException
-from ufl.algorithms import load_ufl_file
-import ufl
-
-# FFC modules.
-from ffc.log import push_level, pop_level
-from ffc.log import DEBUG, INFO, WARNING, ERROR
-from ffc.parameters import default_parameters
-from ffc import __version__ as FFC_VERSION, get_ufc_signature
-from ffc.backends.ufc import __version__ as UFC_VERSION
-from ffc.backends.ufc import get_include_path
-from ffc.compiler import compile_form, compile_element
-from ffc.formatting import write_code
-from ffc.errorcontrol import compile_with_error_control
-
-
-def print_error(msg):
-    "Print error message (cannot use log system at top level)."
-    print("\n".join(["*** FFC: " + line for line in msg.split("\n")]))
-
-
-def info_version():
-    "Print version number."
-    print("""\
-This is FFC, the FEniCS Form Compiler, version {0}.
-UFC backend version {1}, signature {2}.
-For further information, visit https://bitbucket.org/fenics-project/ffc/.
-""".format(FFC_VERSION, UFC_VERSION, get_ufc_signature()))
-
-
-def info_usage():
-    "Print usage information."
-    info_version()
-    print("""Usage: ffc [OPTION]... input.form
-
-For information about the FFC command-line interface, refer to
-the FFC man page which may invoked by 'man ffc' (if installed).
-""")
-
-
-def compile_ufl_data(ufd, prefix, parameters):
-    if parameters["error_control"]:
-        code_h, code_c = compile_with_error_control(ufd.forms,
-                                                    ufd.object_names,
-                                                    ufd.reserved_objects,
-                                                    prefix,
-                                                    parameters)
-    elif len(ufd.forms) > 0:
-        code_h, code_c = compile_form(ufd.forms, ufd.object_names,
-                                      prefix=prefix,
-                                      parameters=parameters)
-    else:
-        code_h, code_c = compile_element(ufd.elements, prefix=prefix,
-                                         parameters=parameters)
-    return code_h, code_c
-
-
-def main(args=None):
-    """This is the commandline tool for the python module ffc."""
-    if args is None:
-        args = sys.argv[1:]
-
-    # Get command-line arguments
-    try:
-        opts, args = getopt.getopt(args, "hIVSdvsl:r:f:Oo:q:ep",
-                                   ["help", "includes", "version", "signature", "debug", "verbose", "silent",
-                                    "language=", "representation=", "optimize",
-                                    "output-directory=", "quadrature-rule=", "error-control",
-                                    "profile"])
-    except getopt.GetoptError:
-        info_usage()
-        print_error("Illegal command-line arguments.")
-        return 1
-
-    # Check for --help
-    if ("-h", "") in opts or ("--help", "") in opts:
-        info_usage()
-        return 0
-
-    # Check for --includes
-    if ("-I", "") in opts or ("--includes", "") in opts:
-        print(get_include_path())
-        return 0
-
-    # Check for --version
-    if ("-V", "") in opts or ("--version", "") in opts:
-        info_version()
-        return 0
-
-    # Check for --signature
-    if ("-S", "") in opts or ("--signature", "") in opts:
-        print(get_ufc_signature())
-        return 0
-
-    # Check that we get at least one file
-    if len(args) == 0:
-        print_error("Missing file.")
-        return 1
-
-    # Get parameters
-    parameters = default_parameters()
-
-    # Choose WARNING as default for script
-    parameters["log_level"] = WARNING
-
-    # Set default value (not part of in parameters[])
-    enable_profile = False
-
-    # Parse command-line parameters
-    for opt, arg in opts:
-        if opt in ("-v", "--verbose"):
-            parameters["log_level"] = INFO
-        elif opt in ("-d", "--debug"):
-            parameters["log_level"] = DEBUG
-        elif opt in ("-s", "--silent"):
-            parameters["log_level"] = ERROR
-        elif opt in ("-l", "--language"):
-            parameters["format"] = arg
-        elif opt in ("-r", "--representation"):
-            parameters["representation"] = arg
-        elif opt in ("-q", "--quadrature-rule"):
-            parameters["quadrature_rule"] = arg
-        elif opt == "-f":
-            if len(arg.split("=")) == 2:
-                (key, value) = arg.split("=")
-                if key not in parameters:
-                    info_usage()
-                    return 1
-                default = parameters[key]
-                if isinstance(default, int):
-                    value = int(value)
-                elif isinstance(default, float):
-                    value = float(value)
-                parameters[key] = value
-            elif len(arg.split("==")) == 1:
-                key = arg.split("=")[0]
-                parameters[arg] = True
-            else:
-                info_usage()
-                return 1
-        elif opt in ("-O", "--optimize"):
-            parameters["optimize"] = True
-        elif opt in ("-o", "--output-directory"):
-            parameters["output_dir"] = arg
-        elif opt in ("-e", "--error-control"):
-            parameters["error_control"] = True
-        elif opt in ("-p", "--profile"):
-            enable_profile = True
-
-    # Set log_level
-    push_level(parameters["log_level"])
-
-    # Set UFL precision
-    ufl.constantvalue.precision = int(parameters["precision"])
-
-    # Print a versioning message if verbose output was requested
-    if parameters["log_level"] <= INFO:
-        info_version()
-
-    # Call parser and compiler for each file
-    resultcode = 0
-    for filename in args:
-
-        # Get filename prefix and suffix
-        prefix, suffix = os.path.splitext(os.path.basename(filename))
-        suffix = suffix.replace(os.path.extsep, "")
-
-        # Check file suffix
-        if suffix != "ufl":
-            print_error("Expecting a UFL form file (.ufl).")
-            resultcode = 1
-            break
-
-        # Remove weird characters (file system allows more than the C
-        # preprocessor)
-        prefix = re.subn("[^{}]".format(string.ascii_letters + string.digits + "_"), "!", prefix)[0]
-        prefix = re.subn("!+", "_", prefix)[0]
-
-        # Turn on profiling
-        if enable_profile:
-            pr = cProfile.Profile()
-            pr.enable()
-
-        # Load UFL file
-        ufd = load_ufl_file(filename)
-
-        # Previously wrapped in try-except, disabled to actually get information we need
-        #try:
-
-        # Generate code
-        code_h, code_c = compile_ufl_data(ufd, prefix, parameters)
-
-        # Write to file
-        write_code(code_h, code_c, prefix, parameters)
-
-        #except Exception as exception:
-        #    # Catch exceptions only when not in debug mode
-        #    if parameters["log_level"] <= DEBUG:
-        #        raise
-        #    else:
-        #        print("")
-        #        print_error(str(exception))
-        #        print_error("To get more information about this error, rerun FFC with --debug.")
-        #        return 1
-
-        # Turn off profiling and write status to file
-        if enable_profile:
-            pr.disable()
-            pfn = "ffc_{0}.profile".format(prefix)
-            pr.dump_stats(pfn)
-            print("Wrote profiling info to file {0}".format(pfn))
-
-    # Reset logging level
-    pop_level()
 
-    return resultcode
+"""This is triggered by running 'python -m ffc'."""
 
+from ffc.main import main
 
 if __name__ == "__main__":
+    import sys
     sys.exit(main())
diff --git a/ffc/analysis.py b/ffc/analysis.py
index 725897f..d6f35e2 100644
--- a/ffc/analysis.py
+++ b/ffc/analysis.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 
-# Copyright (C) 2007-2016 Anders Logg and Kristian B. Oelgaard
+# Copyright (C) 2007-2016 Anders Logg, Martin Alnaes, Kristian B. Oelgaard,
+# and others
 #
 # This file is part of FFC.
 #
@@ -16,9 +17,8 @@
 #
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
-#
-# Modified by Marie E. Rognes, 2010
-# Modified by Martin Sandve Alnæs, 2013-2014
+
+from __future__ import absolute_import
 
 """
 Compiler stage 1: Analysis
@@ -29,22 +29,27 @@ forms, including automatic selection of elements, degrees and
 form representation type.
 """
 
+import numpy
+from six import string_types
+
 import os
 import copy
 from itertools import chain
 
 # UFL modules
-from ufl.finiteelement import MixedElement, EnrichedElement
-from ufl.algorithms import estimate_total_polynomial_degree
+from ufl.classes import Form, CellVolume, FacetArea
+from ufl.integral import Integral
+from ufl.finiteelement import MixedElement, EnrichedElement, VectorElement
 from ufl.algorithms import sort_elements
 from ufl.algorithms import compute_form_data
 from ufl.algorithms.analysis import extract_sub_elements
+from ufl import custom_integral_types
 
 # FFC modules
-from ffc.log import log, info, begin, end, warning, debug, error, warning_blue
-from ffc.quadratureelement import default_quadrature_degree
+from ffc.log import info, begin, end, warning, debug, error, ffc_assert, warning_blue
 from ffc.utils import all_equal
 from ffc.tensor import estimate_cost
+from ffc.cpp import default_precision
 
 
 def analyze_forms(forms, parameters):
@@ -148,9 +153,15 @@ def _analyze_form(form, parameters):
     forced_r = os.environ.get("FFC_FORCE_REPRESENTATION")
     if forced_r:
         warning("representation:    forced by $FFC_FORCE_REPRESENTATION to '%s'" % forced_r)
+        r = forced_r
+        r = "legacy" if r in ["quadrature", "tensor"] else r
+    else:
+        # Check representation parameters to figure out how to preprocess
+        r = _extract_representation_family(form, parameters)
+    debug("Preprocessing form using '%s' representation family." % r)
 
     # Compute form metadata
-    if parameters["representation"] == "uflacs" or forced_r == "uflacs":
+    if r == "uflacs":
         # Temporary workaround to let uflacs have a different preprocessing pipeline
         # than the legacy representations quadrature and tensor. This approach imposes
         # a limitation that e.g. uflacs and tensor representation cannot be mixed in the same form.
@@ -162,18 +173,158 @@ def _analyze_form(form, parameters):
                                       preserve_geometry_types=(Jacobian,),
                                       do_apply_restrictions=True,
                                       )
-    else:
+    elif r == "tsfc":
+        try:
+            # TSFC provides compute_form_data wrapper using correct kwargs
+            from tsfc.ufl_utils import compute_form_data as tsfc_compute_form_data
+        except ImportError:
+            error("Failed to import tsfc.ufl_utils.compute_form_data when asked "
+                  "for tsfc representation.")
+        form_data = tsfc_compute_form_data(form)
+    elif r == "legacy":
+        # quadrature or tensor representation
         form_data = compute_form_data(form)
+    else:
+        error("Unexpected representation family '%s' for form preprocessing." % r)
 
     info("")
     info(str(form_data))
 
     # Attach integral meta data
-    _attach_integral_metadata(form_data, parameters)
+    _attach_integral_metadata(form_data, r, parameters)
+    _validate_representation_choice(form_data, r)
 
     return form_data
 
 
+def _extract_representation_family(form, parameters):
+    """Return 'uflacs', 'tsfc' or 'legacy', or raise error. This
+    takes care of (a) compatibility between representations due to
+    differences in preprocessing, (b) choosing uflacs for
+    higher-order geometries.
+
+    NOTE: Final representation is picked later by
+    ``_determine_representation``.
+    """
+    # Fetch all representation choice requests from metadata
+    representations = set(integral.metadata().get("representation", "auto")
+                          for integral in form.integrals())
+
+    # If auto is present, add parameters value (which may still be auto)
+    # and then remove auto so there's no auto in the set
+    if "auto" in representations:
+        representations.add(parameters["representation"])
+        representations.remove("auto")
+
+    # Translate quadrature/tensor to legacy
+    for r in list(representations):
+        if r in ['quadrature', 'tensor']:
+            representations.remove(r)
+            representations.add('legacy')
+
+    # Sanity check
+    ffc_assert(len(representations.intersection((
+        'quadrature', 'tensor', 'auto', None))) == 0,
+        "Unexpected representation family candidates '%s'." % representations)
+
+    # No representations requested, find compatible representations
+    compatible = _find_compatible_representations(form.integrals(), [])
+    # Translate quadrature/tensor to legacy
+    for r in list(compatible):
+        if r in ['quadrature', 'tensor']:
+            compatible.remove(r)
+            compatible.add('legacy')
+
+    if len(representations) == 1:
+        r = representations.pop()
+        if r not in compatible:
+            error("Representation family %s is not compatible with this form (try one of %s)" % (r, sorted(compatible)))
+        return r
+    elif len(representations) == 0:
+        if len(compatible) == 1:
+            # If only one compatible, use it
+            return compatible.pop()
+        else:
+            # Default to uflacs
+            # NOTE: Need to pick the same default as in _auto_select_representation
+            return "uflacs"
+    else:
+        # Don't tolerate user requests for mixing old and new representation
+        # families in same form due to restrictions in preprocessing
+        assert len(representations) > 1
+        error("Cannot mix legacy (quadrature, tensor), uflacs, or tsfc "
+              "representation in single form.")
+
+
+def _validate_representation_choice(form_data,
+                                    preprocessing_representation_family):
+    """Check that effective representations
+
+    * do not mix legacy, uflacs and tsfc,
+    * implement higher-order geometry,
+    * match employed preprocessing strategy.
+
+    This function is final check that everything is compatible due
+    to the mess in this file. Better safe than sorry...
+    """
+    # Fetch all representations from integral metadata
+    # (this should by now be populated with parameters or defaults instead of auto)
+    representations = set()
+    for ida in form_data.integral_data:
+        representations.add(ida.metadata["representation"])
+        for integral in ida.integrals:
+            representations.add(integral.metadata()["representation"])
+
+    # Translate quadrature/tensor to legacy
+    for r in list(representations):
+        if r in ['quadrature', 'tensor']:
+            representations.remove(r)
+            representations.add('legacy')
+
+    # No integrals
+    if len(representations) == 0:
+        return
+
+    # Require unique family; allow legacy only with affine meshes
+    if len(representations) != 1:
+        error("Failed to extract unique representation family. "
+              "Got '%s'." % representations)
+
+    if _has_higher_order_geometry(form_data.preprocessed_form):
+        ffc_assert('legacy' not in representations,
+            "Did not expect legacy representation for higher-order geometry.")
+
+    # Check preprocessing strategy
+    ffc_assert(preprocessing_representation_family in representations,
+        "Form has been preprocessed using '%s' representaion family, "
+        "while '%s' representations have been set for integrals."
+        % (preprocessing_representation_family, representations))
+
+
+def _has_custom_integrals(o):
+    if isinstance(o, Integral):
+        return o.integral_type() in custom_integral_types
+    elif isinstance(o, Form):
+        return any(_has_custom_integrals(itg) for itg in o.integrals())
+    elif isinstance(o, (list, tuple)):
+        return any(_has_custom_integrals(itg) for itg in o)
+    else:
+        raise NotImplementedError
+
+
+def _has_higher_order_geometry(o):
+    if isinstance(o, Integral):
+        P1 = VectorElement("P", o.ufl_domain().ufl_cell(), 1)
+        return o.ufl_domain().ufl_coordinate_element() != P1
+    elif isinstance(o, Form):
+        P1 = VectorElement("P", o.ufl_cell(), 1)
+        return any(d.ufl_coordinate_element() != P1 for d in o.ufl_domains())
+    elif isinstance(o, (list, tuple)):
+        return any(_has_higher_order_geometry(itg) for itg in o)
+    else:
+        raise NotImplementedError
+
+
 def _extract_common_quadrature_degree(integral_metadatas):
     # Check that quadrature degree is the same
     quadrature_degrees = [md["quadrature_degree"] for md in integral_metadatas]
@@ -182,6 +333,7 @@ def _extract_common_quadrature_degree(integral_metadatas):
             error("Invalid non-integer quadrature degree %s" % (str(d),))
     qd = max(quadrature_degrees)
     if not all_equal(quadrature_degrees):
+        # FIXME: Shouldn't we raise here?
         # TODO: This may be loosened up without too much effort,
         # if the form compiler handles mixed integration degree,
         # something that most of the pipeline seems to be ready for.
@@ -195,7 +347,7 @@ def _autoselect_quadrature_degree(integral_metadata, integral, form_data):
     pd = integral_metadata["estimated_polynomial_degree"]
 
     # Special case: handling -1 as "auto" for quadrature_degree
-    if qd == -1:
+    if qd in [-1, None]:
         qd = "auto"
 
     # TODO: Add other options here
@@ -233,6 +385,7 @@ def _extract_common_quadrature_rule(integral_metadatas):
         qr = quadrature_rules[0]
     else:
         qr = "canonical"
+        # FIXME: Shouldn't we raise here?
         info("Quadrature rule must be equal within each sub domain, using %s rule." % qr)
     return qr
 
@@ -240,7 +393,7 @@ def _extract_common_quadrature_rule(integral_metadatas):
 def _autoselect_quadrature_rule(integral_metadata, integral, form_data):
     # Automatic selection of quadrature rule
     qr = integral_metadata["quadrature_rule"]
-    if qr == "auto":
+    if qr in ["auto", None]:
         # Just use default for now.
         qr = "default"
         info("quadrature_rule:   auto --> %s" % qr)
@@ -253,63 +406,97 @@ def _autoselect_quadrature_rule(integral_metadata, integral, form_data):
     return qr
 
 
-def _determine_representation(integral_metadatas, ida, form_data, parameters):
+def _determine_representation(integral_metadatas, ida, form_data, form_r_family, parameters):
     "Determine one unique representation considering all integrals together."
 
-    # Hack because uflacs and quadrature/tensor cannot coincide in same form because of compute_form_data differences.
-    r = parameters["representation"]
-    if r == "uflacs":
-        warning("representation:    ignoring metadata and using '%s' set by parameters" % r)
-        return r
-
-    # Hack to override representation with environment variable
-    forced_r = os.environ.get("FFC_FORCE_REPRESENTATION")
-    if forced_r:
-        r = forced_r
-        warning("representation:    forced by $FFC_FORCE_REPRESENTATION to '%s'" % r)
-        return r
-
-    # Check that representations are compatible
+    # Extract unique representation among these single-domain integrals
     # (Generating code with different representations within a
     # single tabulate_tensor is considered not worth the effort)
-    representations = set()
-    for md in integral_metadatas:
-        if md["representation"] != "auto":
-            representations.add(md["representation"])
+    representations  = set(md["representation"] for md in integral_metadatas
+                           if md["representation"] != "auto")
+    optimize_values  = set(md["optimize"] for md in integral_metadatas)
+    precision_values = set(md["precision"] for md in integral_metadatas)
+
     if len(representations) > 1:
-        error("Integral representation must be equal within each sub domain or 'auto', got %s." % (str(list(set(representations))),))
-    elif representations:
-        r, = representations
-    else:
-        r = "auto"
+        error("Integral representation must be equal within each sub domain or 'auto', got %s." % (str(sorted(str(v) for v in representations)),))
+    if len(optimize_values) > 1:
+        error("Integral 'optimize' metadata must be equal within each sub domain or not set, got %s." % (str(sorted(str(v) for v in optimize_values)),))
+    if len(precision_values) > 1:
+        error("Integral 'precision' metadata must be equal within each sub domain or not set, got %s." % (str(sorted(str(v) for v in precision_values)),))
+
+    # The one and only non-auto representation found, or get from parameters
+    r, = representations  or (parameters["representation"],)
+    o, = optimize_values  or (parameters["optimize"],)
+    # FIXME: Default param value is zero which is not interpreted well by tsfc!
+    p, = precision_values or (parameters["precision"],)
 
     # If it's still auto, try to determine which representation is best for these integrals
     if r == "auto":
-        rs = set()
-        for integral in ida.integrals:
-            rs.add(_auto_select_representation(integral,
-                                               form_data.unique_sub_elements,
-                                               form_data.function_replace_map))
-        # If any failed to work with tensor, don't use tensor
-        if "tensor" in rs and len(rs) > 1:
-            rs.remove("tensor")
-        # The end result must be unique
-        if len(rs) != 1:
-            error("Failed to auto-select representation, rs=%s." % (str(list(rs)),))
-        r, = rs
+        # Find representations compatible with these integrals
+        compatible = _find_compatible_representations(ida.integrals,
+                                                      form_data.unique_sub_elements)
+        # Pick the one compatible or default to uflacs
+        if len(compatible) == 0:
+            error("Found no representation capable of compiling this form.")
+        elif len(compatible) == 1:
+            r, = compatible
+        else:
+            # NOTE: Need to pick the same default as in _extract_representation_family
+            if form_r_family == "uflacs":
+                r = "uflacs"
+            elif form_r_family == "tsfc":
+                r = "tsfc"
+            elif form_r_family == "legacy":
+                if "tensor" not in compatible:
+                    r = "quadrature"
+                elif "quadrature" not in compatible:
+                    r = "tensor"
+                else:
+                    r = "tensor"
+                    # Use quadrature if tensor representation is not possible or too costly
+                    for integral in ida.integrals:
+                        tensor_cost = estimate_cost(integral, form_data.function_replace_map)
+                        debug("Estimated cost of tensor representation: " + str(tensor_cost))
+                        if tensor_cost == -1 or tensor_cost > 3:
+                            r = "quadrature"
+                            break
+            else:
+                error("Invalid form representation family %s." % (form_r_family,))
         info("representation:    auto --> %s" % r)
     else:
         info("representation:    %s" % r)
 
-    return r
+    if p is None:
+        p = default_precision
+
+    # Hack to override representation with environment variable
+    forced_r = os.environ.get("FFC_FORCE_REPRESENTATION")
+    if forced_r:
+        r = forced_r
+        warning("representation:    forced by $FFC_FORCE_REPRESENTATION to '%s'" % r)
+        return r, o, p
 
+    return r, o, p
 
-def _attach_integral_metadata(form_data, parameters):
+
+def _attach_integral_metadata(form_data, form_r_family, parameters):
     "Attach integral metadata"
     # TODO: A nicer data flow would avoid modifying the form_data at all.
 
-    # Recognized metadata keys
-    metadata_keys = ("representation", "quadrature_degree", "quadrature_rule")
+    # Parameter values which make sense "per integrals" or "per integral"
+    metadata_keys = (
+        "representation",
+        "optimize",
+        # TODO: Could have finer optimize (sub)parameters here later
+        "precision",
+        # NOTE: We don't pass precision to quadrature and tensor, it's not
+        #       worth resolving set_float_formatting hack for (almost)
+        #       deprecated backends
+        "quadrature_degree",
+        "quadrature_rule",
+    )
+
+    # Get defaults from parameters
     metadata_parameters = {key: parameters[key] for key in metadata_keys if key in parameters}
 
     # Iterate over integral collections
@@ -328,10 +515,14 @@ def _attach_integral_metadata(form_data, parameters):
             integral_metadatas[i].update(integral.metadata() or {})
 
         # Determine representation, must be equal for all integrals on same subdomain
-        r = _determine_representation(integral_metadatas, ida, form_data, parameters)
+        r, o, p = _determine_representation(integral_metadatas, ida, form_data, form_r_family, parameters)
         for i, integral in enumerate(ida.integrals):
             integral_metadatas[i]["representation"] = r
+            integral_metadatas[i]["optimize"] = o
+            integral_metadatas[i]["precision"] = p
         ida.metadata["representation"] = r
+        ida.metadata["optimize"] = o
+        ida.metadata["precision"] = p
 
         # Determine automated updates to metadata values
         for i, integral in enumerate(ida.integrals):
@@ -370,7 +561,7 @@ def _attach_integral_metadata(form_data, parameters):
     _validate_quadrature_schemes_of_elements(quad_schemes, form_data.unique_sub_elements)
 
 
-def _validate_quadrature_schemes_of_elements(quad_schemes, elements):  # form_data):
+def _validate_quadrature_schemes_of_elements(quad_schemes, elements):
     # Update scheme for QuadratureElements
     if quad_schemes and all_equal(quad_schemes):
         scheme = quad_schemes[0]
@@ -396,7 +587,7 @@ def _get_sub_elements(element):
     return sub_elements
 
 
-def _auto_select_representation(integral, elements, function_replace_map):
+def _find_compatible_representations(integrals, elements):
     """
     Automatically select a suitable representation for integral.
     Note that the selection is made for each integral, not for
@@ -404,10 +595,21 @@ def _auto_select_representation(integral, elements, function_replace_map):
     into the same integral (if their measures are equal) will
     necessarily get the same representation.
     """
+    # All representations
+    compatible = set(("uflacs", "quadrature", "tensor", "tsfc"))
+
+    # Check for non-affine meshes
+    if _has_higher_order_geometry(integrals):
+        compatible &= set(("uflacs", "tsfc"))
+
+    # Custom integrals
+    if _has_custom_integrals(integrals):
+        compatible &= set(("quadrature",))
 
-    # Skip unsupported integration domain types
-    if integral.integral_type() == "vertex":
-        return "quadrature"
+    # Use quadrature for vertex integrals
+    if any(integral.integral_type() == "vertex" for integral in integrals):
+        # TODO: Test with uflacs, I think this works fine now:
+        compatible &= set(("quadrature", "uflacs", "tsfc"))
 
     # Get ALL sub elements, needed to check for restrictions of EnrichedElements.
     sub_elements = []
@@ -416,18 +618,7 @@ def _auto_select_representation(integral, elements, function_replace_map):
 
     # Use quadrature representation if we have a quadrature element
     if any(e.family() == "Quadrature" for e in sub_elements):
-        return "quadrature"
+        # TODO: Test with uflacs, might need a little adjustment:
+        compatible &= set(("quadrature", "uflacs", "tsfc"))
 
-    # Estimate cost of tensor representation
-    tensor_cost = estimate_cost(integral, function_replace_map)
-    debug("Estimated cost of tensor representation: " + str(tensor_cost))
-
-    # Use quadrature if tensor representation is not possible
-    if tensor_cost == -1:
-        return "quadrature"
-
-    # Otherwise, select quadrature when cost is high
-    if tensor_cost <= 3:
-        return "tensor"
-    else:
-        return "quadrature"
+    return compatible
diff --git a/ffc/backends/ufc/__init__.py b/ffc/backends/ufc/__init__.py
index 9443e04..9ab2273 100644
--- a/ffc/backends/ufc/__init__.py
+++ b/ffc/backends/ufc/__init__.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-"""Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
+"""Code generation format strings for UFC (Unified Form-assembly Code) version 2017.1.0
 
 Five format strings are defined for each of the following UFC classes:
 
@@ -44,8 +44,8 @@ For more information about UFC and the FEniCS Project, visit
 """
 
 __author__ = "Martin Sandve Alnæs, Anders Logg, Kent-Andre Mardal, Ola Skavhaug, and Hans Petter Langtangen"
-__date__ = "2016-11-30"
-__version__ = "2016.2.0"
+__date__ = "2017-05-09"
+__version__ = "2017.1.0"
 __license__ = "This code is released into the public domain"
 
 import os
diff --git a/ffc/backends/ufc/coordinate_mapping.py b/ffc/backends/ufc/coordinate_mapping.py
index 12296c0..b06415d 100644
--- a/ffc/backends/ufc/coordinate_mapping.py
+++ b/ffc/backends/ufc/coordinate_mapping.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
-# Code generation format strings for UFC (Unified Form-assembly Code) v. 2016.1.0dev.
+# Code generation format strings for UFC (Unified Form-assembly Code) v. 2017.1.0
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2017
 
 coordinate_mapping_header = """
 class %(classname)s: public ufc::coordinate_mapping
diff --git a/ffc/backends/ufc/dofmap.py b/ffc/backends/ufc/dofmap.py
index 6215165..a3d1365 100644
--- a/ffc/backends/ufc/dofmap.py
+++ b/ffc/backends/ufc/dofmap.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
-# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2017.1.0
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2017.
 
 dofmap_combined = """
 class %(classname)s: public ufc::dofmap
diff --git a/ffc/backends/ufc/finite_element.py b/ffc/backends/ufc/finite_element.py
index f928e46..b21e4b1 100644
--- a/ffc/backends/ufc/finite_element.py
+++ b/ffc/backends/ufc/finite_element.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
-# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2017.1.0
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2017.
 
 finite_element_combined = """
 class %(classname)s: public ufc::finite_element
diff --git a/ffc/backends/ufc/form.py b/ffc/backends/ufc/form.py
index 0485340..e108e89 100644
--- a/ffc/backends/ufc/form.py
+++ b/ffc/backends/ufc/form.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
-# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2017.1.0
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2017.
 
 form_combined = """
 class %(classname)s: public ufc::form
diff --git a/ffc/backends/ufc/function.py b/ffc/backends/ufc/function.py
index 721cd2b..b35cb68 100644
--- a/ffc/backends/ufc/function.py
+++ b/ffc/backends/ufc/function.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
-# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2017.1.0
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2017
 
 function_combined = """
 class %(classname)s: public ufc::function
diff --git a/ffc/backends/ufc/integrals.py b/ffc/backends/ufc/integrals.py
index 3d064f8..417bf6c 100644
--- a/ffc/backends/ufc/integrals.py
+++ b/ffc/backends/ufc/integrals.py
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
-# Code generation format strings for UFC (Unified Form-assembly Code) version 2016.2.0
+# Code generation format strings for UFC (Unified Form-assembly Code) version 2017.1.0
 # This code is released into the public domain.
 #
-# The FEniCS Project (http://www.fenicsproject.org/) 2006-2016
+# The FEniCS Project (http://www.fenicsproject.org/) 2006-2017
 
 cell_integral_combined = """
 class %(classname)s: public ufc::cell_integral
diff --git a/ffc/backends/ufc/ufc.h b/ffc/backends/ufc/ufc.h
index ce70476..4ff37d5 100644
--- a/ffc/backends/ufc/ufc.h
+++ b/ffc/backends/ufc/ufc.h
@@ -1,13 +1,13 @@
-// This is UFC (Unified Form-assembly Code) v. 2016.2.0
+// This is UFC (Unified Form-assembly Code) v. 2017.1.0
 // This code is released into the public domain.
 //
-// The FEniCS Project (http://www.fenicsproject.org/) 2006-2016.
+// The FEniCS Project (http://www.fenicsproject.org/) 2006-2017.
 
 #ifndef __UFC_H
 #define __UFC_H
 
-#define UFC_VERSION_MAJOR 2016
-#define UFC_VERSION_MINOR 2
+#define UFC_VERSION_MAJOR 2017
+#define UFC_VERSION_MINOR 1
 #define UFC_VERSION_MAINTENANCE 0
 #define UFC_VERSION_RELEASE 1
 
diff --git a/ffc/codegeneration.py b/ffc/codegeneration.py
index a7a3386..bd93ab4 100644
--- a/ffc/codegeneration.py
+++ b/ffc/codegeneration.py
@@ -57,7 +57,7 @@ def generate_code(ir, parameters):
 
     # FIXME: This has global side effects
     # Set code generation parameters
-    set_float_formatting(int(parameters["precision"]))
+    set_float_formatting(parameters["precision"])
     set_exception_handling(parameters["convert_exceptions_to_warnings"])
 
     # Extract representations
@@ -309,6 +309,64 @@ def _generate_coordinate_mapping_code(ir, parameters):
     return code
 
 
+tt_timing_template = """
+    // Initialize timing variables
+    static const std::size_t _tperiod = 10000;
+    static std::size_t _tcount = 0;
+    static auto _tsum = std::chrono::nanoseconds::zero();
+    static auto _tavg_best = std::chrono::nanoseconds::max();
+    static auto _tmin = std::chrono::nanoseconds::max();
+    static auto _tmax = std::chrono::nanoseconds::min();
+
+    // Measure single kernel time
+    auto _before = std::chrono::high_resolution_clock::now();
+    { // Begin original kernel
+%s
+    } // End original kernel
+    // Measure single kernel time
+    auto _after = std::chrono::high_resolution_clock::now();
+
+    // Update time stats
+    const std::chrono::seconds _s(1);
+    auto _tsingle = _after - _before;
+    ++_tcount;
+    _tsum += _tsingle;
+    _tmin = std::min(_tmin, _tsingle);
+    _tmax = std::max(_tmax, _tsingle);
+
+    if (_tcount %% _tperiod == 0 || _tsum > _s)
+    {
+        // Record best average across batches
+        std::chrono::nanoseconds _tavg = _tsum / _tcount;
+        if (_tavg_best > _tavg)
+            _tavg_best = _tavg;
+
+        // Convert to ns
+        auto _tot_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(_tsum).count();
+        auto _avg_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(_tavg).count();
+        auto _min_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(_tmin).count();
+        auto _max_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(_tmax).count();
+        auto _avg_best_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(_tavg_best).count();
+
+        // Print report
+        std::cout << "FFC tt time:"
+                  << "  avg_best = " << _avg_best_ns << " ns,"
+                  << "  avg = " << _avg_ns << " ns,"
+                  << "  min = " << _min_ns << " ns,"
+                  << "  max = " << _max_ns << " ns,"
+                  << "  tot = " << _tot_ns << " ns,"
+                  << "  n = " << _tcount
+                  << std::endl;
+
+        // Reset statistics for next batch
+        _tcount = 0;
+        _tsum = std::chrono::nanoseconds(0);
+        _tmin = std::chrono::nanoseconds::max();
+        _tmax = std::chrono::nanoseconds::min();
+    }
+"""
+
+
 def _generate_integral_code(ir, parameters):
     "Generate code for integrals from intermediate representation."
 
@@ -323,6 +381,17 @@ def _generate_integral_code(ir, parameters):
     # TODO: Drop prefix argument and get from ir:
     code = r.generate_integral_code(ir, ir["prefix"], parameters)
 
+    # Hack for benchmarking overhead in assembler with empty tabulate_tensor
+    if parameters["generate_dummy_tabulate_tensor"]:
+        code["tabulate_tensor"] = ""
+
+    # Wrapping tabulate_tensor in a timing snippet for benchmarking
+    if parameters["add_tabulate_tensor_timing"]:
+        code["tabulate_tensor"] = tt_timing_template % code["tabulate_tensor"]
+        code["additional_includes_set"] = code.get("additional_includes_set", set())
+        code["additional_includes_set"].add("#include <chrono>")
+        code["additional_includes_set"].add("#include <iostream>")
+
     # Generate comment
     code["tabulate_tensor_comment"] = _generate_tabulate_tensor_comment(ir, parameters)
 
diff --git a/ffc/codesnippets.py b/ffc/codesnippets.py
index 0327614..3862d3a 100644
--- a/ffc/codesnippets.py
+++ b/ffc/codesnippets.py
@@ -549,6 +549,7 @@ for (unsigned int row = 0; row < %%(num_derivatives)s%(g)s; row++)
 
     return snippet
 
+
 # Codesnippets used in evaluate_dof
 _map_onto_physical_1D = """\
 // Evaluate basis functions for affine mapping
diff --git a/ffc/cpp.py b/ffc/cpp.py
index 49e470f..9a5fa94 100644
--- a/ffc/cpp.py
+++ b/ffc/cpp.py
@@ -27,13 +27,18 @@ import re
 import numpy
 import platform
 from six import string_types
+from six.moves import zip
 
 # UFL modules
 from ufl import custom_integral_types
 
 # FFC modules
 from ffc.log import debug, error
-from six.moves import zip
+
+
+# Default precision for formatting floats
+default_precision = numpy.finfo("double").precision + 1  # == 16
+
 
 # ufc class names
 
@@ -691,6 +696,10 @@ def count_ops(code):
 def set_float_formatting(precision):
     "Set floating point formatting based on precision."
 
+    # Set default if not set
+    if precision is None:
+        precision = default_precision
+
     # Options for float formatting
     # f1     = "%%.%df" % precision
     # f2     = "%%.%de" % precision
@@ -726,8 +735,8 @@ def set_float_formatting(precision):
     format["epsilon"] = 10.0 * eval("1e-%s" % precision)
 
     # Hack to propagate precision to uflacs internals...
-    import ffc.uflacs.language.format_value
-    ffc.uflacs.language.format_value.set_float_precision(precision)
+    #import ffc.uflacs.language.format_value
+    #ffc.uflacs.language.format_value.set_float_precision(precision)
 
 
 def set_exception_handling(convert_exceptions_to_warnings):
diff --git a/ffc/fiatinterface.py b/ffc/fiatinterface.py
index 9df6f06..f52f514 100644
--- a/ffc/fiatinterface.py
+++ b/ffc/fiatinterface.py
@@ -30,13 +30,11 @@ from numpy import array
 # UFL and FIAT modules
 import ufl
 from ufl.utils.sorting import sorted_by_key
-
 import FIAT
 from FIAT.hdiv_trace import HDivTrace
 
 # FFC modules
 from ffc.log import debug, error
-
 from ffc.mixedelement import MixedElement
 from ffc.restrictedelement import RestrictedElement
 from ffc.enrichedelement import EnrichedElement, SpaceOfReals
@@ -342,5 +340,6 @@ def _indices(element, restriction_domain, tdim):
             indices += index
     return indices
 
+
 # Import FFC module with circular dependency
 from ffc.quadratureelement import QuadratureElement
diff --git a/ffc/jitcompiler.py b/ffc/jitcompiler.py
index 0ef7415..fd561b9 100644
--- a/ffc/jitcompiler.py
+++ b/ffc/jitcompiler.py
@@ -145,11 +145,17 @@ def compute_jit_prefix(ufl_object, parameters, kind=None):
     # Compute deterministic string of relevant parameters
     parameters_signature = compute_jit_parameters_signature(parameters)
 
+    # Increase this number at any time to invalidate cache
+    # signatures if code generation has changed in important
+    # ways without the change being visible in regular signatures:
+    jit_version_bump = 3
+
     # Build combined signature
     signatures = [
         object_signature,
         parameters_signature,
         str(FFC_VERSION),
+        str(jit_version_bump),
         get_ufc_signature(),
         get_ufc_templates_signature(),
         kind,
diff --git a/ffc/log.py b/ffc/log.py
index 150c2c1..c868191 100644
--- a/ffc/log.py
+++ b/ffc/log.py
@@ -39,13 +39,13 @@ ffc_logger = Logger("FFC")
 for foo in log_functions:
     exec("%s = lambda *message : ffc_logger.%s(*message)" % (foo, foo))
 
-# Assertion, copied from UFL
-
 
+# Assertion, copied from UFL
 def ffc_assert(condition, *message):
     "Assert that condition is true and otherwise issue an error with given message."
     condition or error(*message)
 
+
 # Set default log level
 set_level(INFO)
 
diff --git a/ffc/__main__.py b/ffc/main.py
similarity index 86%
copy from ffc/__main__.py
copy to ffc/main.py
index 1533e5d..b153766 100644
--- a/ffc/__main__.py
+++ b/ffc/main.py
@@ -1,9 +1,10 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-# This script is the command-line interface to FFC. It parses
-# command-line arguments and wraps the given form file code in a
-# Python module which is then executed.
+"""This script is the command-line interface to FFC.
+
+It parses command-line arguments and generates code from input UFL form files.
+"""
 
 # Copyright (C) 2004-2016 Anders Logg
 #
@@ -26,6 +27,7 @@
 # Modified by Ola Skavhaug, 2006.
 # Modified by Dag Lindbo, 2008.
 # Modified by Kristian B. Oelgaard 2010.
+# Modified by Martin Sandve Alnæs 2017.
 
 from __future__ import print_function
 
@@ -46,7 +48,7 @@ from ufl.algorithms import load_ufl_file
 import ufl
 
 # FFC modules.
-from ffc.log import push_level, pop_level
+from ffc.log import push_level, pop_level, set_indent, ffc_logger
 from ffc.log import DEBUG, INFO, WARNING, ERROR
 from ffc.parameters import default_parameters
 from ffc import __version__ as FFC_VERSION, get_ufc_signature
@@ -68,7 +70,10 @@ def info_version():
 This is FFC, the FEniCS Form Compiler, version {0}.
 UFC backend version {1}, signature {2}.
 For further information, visit https://bitbucket.org/fenics-project/ffc/.
-""".format(FFC_VERSION, UFC_VERSION, get_ufc_signature()))
+
+Python {3} on {4}
+""".format(FFC_VERSION, UFC_VERSION, get_ufc_signature(),
+           sys.version, sys.platform))
 
 
 def info_usage():
@@ -105,9 +110,11 @@ def main(args=None):
 
     # Get command-line arguments
     try:
-        opts, args = getopt.getopt(args, "hIVSdvsl:r:f:Oo:q:ep",
+        if "-O" in args:
+            args[args.index("-O")] = "-O2"
+        opts, args = getopt.getopt(args, "hIVSdvsl:r:f:O:o:q:ep",
                                    ["help", "includes", "version", "signature", "debug", "verbose", "silent",
-                                    "language=", "representation=", "optimize",
+                                    "language=", "representation=", "optimize=",
                                     "output-directory=", "quadrature-rule=", "error-control",
                                     "profile"])
     except getopt.GetoptError:
@@ -166,10 +173,7 @@ def main(args=None):
         elif opt == "-f":
             if len(arg.split("=")) == 2:
                 (key, value) = arg.split("=")
-                if key not in parameters:
-                    info_usage()
-                    return 1
-                default = parameters[key]
+                default = parameters.get(key)
                 if isinstance(default, int):
                     value = int(value)
                 elif isinstance(default, float):
@@ -177,12 +181,17 @@ def main(args=None):
                 parameters[key] = value
             elif len(arg.split("==")) == 1:
                 key = arg.split("=")[0]
-                parameters[arg] = True
+                if key.startswith("no-"):
+                    key = key[3:]
+                    value = False
+                else:
+                    value = True
+                parameters[key] = value
             else:
                 info_usage()
                 return 1
         elif opt in ("-O", "--optimize"):
-            parameters["optimize"] = True
+            parameters["optimize"] = bool(int(arg))
         elif opt in ("-o", "--output-directory"):
             parameters["output_dir"] = arg
         elif opt in ("-e", "--error-control"):
@@ -193,8 +202,9 @@ def main(args=None):
     # Set log_level
     push_level(parameters["log_level"])
 
+    # FIXME: This is terrible!
     # Set UFL precision
-    ufl.constantvalue.precision = int(parameters["precision"])
+    #ufl.constantvalue.precision = int(parameters["precision"])
 
     # Print a versioning message if verbose output was requested
     if parameters["log_level"] <= INFO:
@@ -202,6 +212,19 @@ def main(args=None):
 
     # Call parser and compiler for each file
     resultcode = 0
+    init_indent = ffc_logger._indent_level
+    try:
+        resultcode = _compile_files(args, parameters, enable_profile)
+    finally:
+        # Reset logging level and indent
+        pop_level()
+        set_indent(init_indent)
+
+    return resultcode
+
+
+def _compile_files(args, parameters, enable_profile):
+    # Call parser and compiler for each file
     for filename in args:
 
         # Get filename prefix and suffix
@@ -211,8 +234,7 @@ def main(args=None):
         # Check file suffix
         if suffix != "ufl":
             print_error("Expecting a UFL form file (.ufl).")
-            resultcode = 1
-            break
+            return 1
 
         # Remove weird characters (file system allows more than the C
         # preprocessor)
@@ -253,11 +275,4 @@ def main(args=None):
             pr.dump_stats(pfn)
             print("Wrote profiling info to file {0}".format(pfn))
 
-    # Reset logging level
-    pop_level()
-
-    return resultcode
-
-
-if __name__ == "__main__":
-    sys.exit(main())
+    return 0
diff --git a/ffc/optimization.py b/ffc/optimization.py
index 62c1048..3c254e4 100644
--- a/ffc/optimization.py
+++ b/ffc/optimization.py
@@ -37,17 +37,18 @@ def optimize_ir(ir, parameters):
 
     begin("Compiler stage 3: Optimizing intermediate representation")
 
-    # Check if optimization is requested
-    if not parameters["optimize"]:
-        info("Skipping optimizations, add -O to optimize")
-        end()
-        return ir
-
     # Extract representations
     ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms = ir
 
-    # Iterate over integrals
-    oir_integrals = [_optimize_integral_ir(ir, parameters) for ir in ir_integrals]
+    # Check if optimization is requested
+    if not any(ir["integrals_metadata"]["optimize"] for ir in ir_integrals):
+        info(r"Skipping optimizations, add -O or attach {'optimize': True} "
+             "metadata to integrals")
+
+    # Call on every bunch of integrals wich are compiled together
+    oir_integrals = [_optimize_integral_ir(ir, parameters)
+                     if ir["integrals_metadata"]["optimize"] else ir
+                     for ir in ir_integrals]
 
     end()
 
@@ -56,13 +57,5 @@ def optimize_ir(ir, parameters):
 
 def _optimize_integral_ir(ir, parameters):
     "Compute optimized intermediate represention of integral."
-
-    # Select representation
     r = pick_representation(ir["representation"])
-
-    # Optimize representation (if available for representation)
-    try:
-        oir = r.optimize_integral_ir(ir, parameters)
-        return oir
-    except:
-        return ir
+    return r.optimize_integral_ir(ir, parameters)
diff --git a/ffc/parameters.py b/ffc/parameters.py
index 72b346d..4bbf2c7 100644
--- a/ffc/parameters.py
+++ b/ffc/parameters.py
@@ -17,12 +17,12 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
 import os
+import copy
 from ffc.log import INFO
 
 
 # Comments from other places in code:
 # FIXME: Document option -fconvert_exceptions_to_warnings
-# FIXME: Remove option epsilon and just rely on precision?
 
 # NB! Parameters in the generate and build sets are
 # included in jit signature, cache and log are not.
@@ -30,14 +30,20 @@ _FFC_GENERATE_PARAMETERS = {
     "format": "ufc",           # code generation format
     "representation": "auto",  # form representation / code
                                # generation strategy
-    "quadrature_rule": "auto", # quadrature rule used for
+    "quadrature_rule": None,   # quadrature rule used for
                                # integration of element tensors
-    "quadrature_degree": -1,   # quadrature degree used for
+                               # (None is auto)
+    "quadrature_degree": None, # quadrature degree used for
                                # computing integrals
-    "precision": 15,           # precision used when writing
-                               # numbers
+                               # (None is auto)
+    "precision": None,         # precision used when writing
+                               # numbers (None for max precision)
     "epsilon": 1e-14,          # machine precision, used for
                                # dropping zero terms
+                               # (tensor repr)
+                               # FIXME: Remove option epsilon and just rely on
+                               # precision?  Seems that this can be done once
+                               # tensor repr is removed
     "split": False,            # split generated code into .h and
                                # .cpp file
     "form_postfix": True,      # postfix form name with "Function",
@@ -45,12 +51,14 @@ _FFC_GENERATE_PARAMETERS = {
     "convert_exceptions_to_warnings": False,   # convert all exceptions to warning
                                                # in generated code
     "error_control": False,   # with error control
-    "optimize": False,        # optimise the code generation
+    "optimize": True,         # turn on optimization for code generation
     "max_signature_length": 0,  # set to positive integer to shorten signatures
+    "generate_dummy_tabulate_tensor": False,  # set to True to replace tabulate_tensor body with no-op
+    "add_tabulate_tensor_timing": False,      # set to True to add timing inside tabulate_tensor
 }
 _FFC_BUILD_PARAMETERS = {
-    "cpp_optimize": True,          # optimization for the JIT compiler
-    "cpp_optimize_flags": "-O2",   # optimization flags for the JIT compiler
+    "cpp_optimize": True,          # optimization for the C++ compiler
+    "cpp_optimize_flags": "-O2",   # optimization flags for the C++ compiler
 }
 _FFC_CACHE_PARAMETERS = {
     "cache_dir": "",        # cache dir used by Instant
@@ -83,7 +91,7 @@ def split_parameters(parameters):
 
 def default_parameters():
     "Return (a copy of) the default parameter values for FFC."
-    parameters = FFC_PARAMETERS.copy()
+    parameters = copy.deepcopy(FFC_PARAMETERS)
 
     # HACK
     r = os.environ.get("FFC_FORCE_REPRESENTATION")
@@ -98,7 +106,7 @@ def default_jit_parameters():
 
     # TODO: This is not in the above parameters dict.
     #       There are other parameters like this.
-    #       This is confusing, which parameters are available? What ar the defaults?
+    #       This is confusing, which parameters are available? What are the defaults?
     # Skip evaluation of basis derivatives in elements by default because it's costly
     # FIXME: Make this False when we have elements generated once instead of for each form
     parameters["no-evaluate_basis_derivatives"] = True
@@ -114,6 +122,9 @@ def validate_parameters(parameters):
     p = default_parameters()
     if parameters is not None:
         p.update(parameters)
+
+    _validate_parameters(p)
+
     return p
 
 
@@ -122,9 +133,47 @@ def validate_jit_parameters(parameters):
     p = default_jit_parameters()
     if parameters is not None:
         p.update(parameters)
+
+    _validate_parameters(p)
+
     return p
 
 
+def _validate_parameters(parameters):
+    """Does some casting of parameter values in place on the
+    provided dictionary"""
+
+    # Cast int optimize flag to bool
+    if isinstance(parameters["optimize"], int):
+        parameters["optimize"] = bool(parameters["optimize"])
+
+    # Convert all legal default values to None
+    if parameters["quadrature_rule"] in ["auto", None, "None"]:
+        parameters["quadrature_rule"] = None
+
+    # Convert all legal default values to None and
+    # cast nondefaults from str to int
+    if parameters["quadrature_degree"] in ["auto", -1, None, "None"]:
+        parameters["quadrature_degree"] = None
+    else:
+        try:
+            parameters["quadrature_degree"] = int(parameters["quadrature_degree"])
+        except Exception:
+            error("Failed to convert quadrature degree '%s' to int"
+                  % parameters.get("quadrature_degree"))
+
+    # Convert all legal default values to None and
+    # cast nondefaults from str to int
+    if parameters["precision"] in ["auto", None, "None"]:
+        parameters["precision"] = None
+    else:
+        try:
+            parameters["precision"] = int(parameters["precision"])
+        except Exception:
+            error("Failed to convert precision '%s' to int"
+                  % parameters.get("precision"))
+
+
 def compilation_relevant_parameters(parameters):
     p = parameters.copy()
     for k in _FFC_LOG_PARAMETERS:
diff --git a/ffc/quadrature/__init__.py b/ffc/quadrature/__init__.py
index 0757341..5d3e414 100644
--- a/ffc/quadrature/__init__.py
+++ b/ffc/quadrature/__init__.py
@@ -1,4 +1,4 @@
 # -*- coding: utf-8 -*-
-from .quadraturerepresentation import compute_integral_ir
-from .quadratureoptimization import optimize_integral_ir
-from .quadraturegenerator import generate_integral_code
+from ffc.quadrature.quadraturerepresentation import compute_integral_ir
+from ffc.quadrature.quadratureoptimization import optimize_integral_ir
+from ffc.quadrature.quadraturegenerator import generate_integral_code
diff --git a/ffc/quadrature/optimisedquadraturetransformer.py b/ffc/quadrature/optimisedquadraturetransformer.py
index 965017d..a5a7481 100644
--- a/ffc/quadrature/optimisedquadraturetransformer.py
+++ b/ffc/quadrature/optimisedquadraturetransformer.py
@@ -27,7 +27,7 @@ from six import advance_iterator as next
 
 # UFL common.
 from ufl.utils.sorting import sorted_by_key
-from ufl import custom_integral_types
+from ufl.measure import custom_integral_types, point_integral_types
 
 # UFL Classes.
 from ufl.classes import IntValue
@@ -773,7 +773,7 @@ class QuadratureTransformerOpt(QuadratureTransformerBase):
         weight = self._create_symbol(weight, ACCESS)[()]
 
         # Create value.
-        if integral_type in (("vertex",) + custom_integral_types):
+        if integral_type in (point_integral_types + custom_integral_types):
             trans_set = set()
             value = create_product([val, weight])
         else:
diff --git a/ffc/quadrature/parameters.py b/ffc/quadrature/parameters.py
index 21dd180..519ae1a 100644
--- a/ffc/quadrature/parameters.py
+++ b/ffc/quadrature/parameters.py
@@ -19,7 +19,7 @@
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 #
 # Modified by Anders Logg 2009, 2014
-# Modified by Martin Sandve Alnæs 2013-2014
+# Modified by Martin Sandve Alnæs 2013-2017
 
 # UFL modules
 from ufl import custom_integral_types
@@ -28,18 +28,25 @@ from ufl import custom_integral_types
 from ffc.log import warning
 
 
+def default_optimize_parameters():
+    return {
+        "eliminate zeros": False,
+        "optimisation": False,
+        "ignore ones": False,
+        "remove zero terms": False,
+        "ignore zero tables": False,
+        }
+
+
 def parse_optimise_parameters(parameters, itg_data):
 
     # Initialize parameters
-    optimise_parameters = {"eliminate zeros": False,
-                           "optimisation": False,
-                           "ignore ones": False,
-                           "remove zero terms": False,
-                           "ignore zero tables": False}
+    optimise_parameters = default_optimize_parameters()
 
     # Set optimized parameters
     if parameters["optimize"] and itg_data.integral_type in custom_integral_types:
         warning("Optimization not available for custom integrals, skipping optimization.")
+
     elif parameters["optimize"]:
         optimise_parameters["ignore ones"] = True
         optimise_parameters["remove zero terms"] = True
diff --git a/ffc/quadrature/quadraturegenerator.py b/ffc/quadrature/quadraturegenerator.py
index fa6abd0..0c463c6 100644
--- a/ffc/quadrature/quadraturegenerator.py
+++ b/ffc/quadrature/quadraturegenerator.py
@@ -53,6 +53,11 @@ def generate_integral_code(ir, prefix, parameters):
     code["tabulate_tensor"] = _tabulate_tensor(ir, prefix, parameters)
     code["additional_includes_set"] = ir["additional_includes_set"]
 
+    precision = ir["integrals_metadata"].get("precision")
+    if precision is not None and precision != parameters["precision"]:
+        warning("Ignoring precision in integral metadata compiled "
+                "using quadrature representation. Not implemented.")
+
     return code
 
 
diff --git a/ffc/quadrature/quadraturetransformer.py b/ffc/quadrature/quadraturetransformer.py
index e65e88a..cf1bde7 100644
--- a/ffc/quadrature/quadraturetransformer.py
+++ b/ffc/quadrature/quadraturetransformer.py
@@ -28,7 +28,7 @@ from six import advance_iterator as next
 
 # UFL common.
 from ufl.utils.sorting import sorted_by_key
-from ufl import custom_integral_types
+from ufl.measure import custom_integral_types, point_integral_types
 
 # UFL Classes.
 from ufl.classes import IntValue
@@ -841,7 +841,7 @@ class QuadratureTransformer(QuadratureTransformerBase):
             weight += format["component"]("", format["integration points"])
 
         # Update sets of used variables.
-        if integral_type in (("vertex",) + custom_integral_types):
+        if integral_type in (point_integral_types + custom_integral_types):
             trans_set = set()
             value = format["mul"]([val, weight])
         else:
diff --git a/ffc/quadrature/quadraturetransformerbase.py b/ffc/quadrature/quadraturetransformerbase.py
index 1b3a875..3005110 100644
--- a/ffc/quadrature/quadraturetransformerbase.py
+++ b/ffc/quadrature/quadraturetransformerbase.py
@@ -494,6 +494,10 @@ class QuadratureTransformerBase(Transformer):
 
         if self.vertex is not None:
             error("Spatial coordinates (x) not implemented for point measure (dP)")  # TODO: Implement this, should be just the point.
+        elif self.points is None:
+            gdim, = o.ufl_shape
+            coordinate = "quadrature_points[ip*%d + %d]" % (gdim, c)
+            return self._create_symbol(coordinate, IP)
         else:
             # Generate the appropriate coordinate and update tables.
             coordinate = format["ip coordinates"](self.points, c)
diff --git a/ffc/quadrature/symbolics.py b/ffc/quadrature/symbolics.py
index 9b8cd24..295e783 100644
--- a/ffc/quadrature/symbolics.py
+++ b/ffc/quadrature/symbolics.py
@@ -46,6 +46,7 @@ def create_float(val):
     _float_cache[val] = float_val
     return float_val
 
+
 _symbol_cache = {}
 
 
@@ -57,6 +58,7 @@ def create_symbol(variable, symbol_type, base_expr=None, base_op=0):
     _symbol_cache[key] = symbol
     return symbol
 
+
 _product_cache = {}
 
 
@@ -72,6 +74,7 @@ def create_product(variables):
     _product_cache[key] = product
     return product
 
+
 _sum_cache = {}
 
 
@@ -88,6 +91,7 @@ def create_sum(variables):
     _sum_cache[key] = s
     return s
 
+
 _fraction_cache = {}
 
 
diff --git a/ffc/quadratureelement.py b/ffc/quadratureelement.py
index 092ce2f..712218a 100644
--- a/ffc/quadratureelement.py
+++ b/ffc/quadratureelement.py
@@ -26,7 +26,9 @@ import numpy
 from FIAT.functional import PointEvaluation
 
 # FFC modules.
-from .log import error, info_red
+from ffc.fiatinterface import reference_cell, create_quadrature
+from ffc.log import error, info_red
+
 
 # Default quadrature element degree
 default_quadrature_degree = 1
@@ -134,7 +136,3 @@ def _create_entity_dofs(fiat_cell, num_dofs):
             entity_dofs[dim][entity] = []
     entity_dofs[dim][0] = list(range(num_dofs))
     return entity_dofs
-
-# FFC modules to avoid circular import
-from ffc.fiatinterface import reference_cell
-from ffc.fiatinterface import create_quadrature
diff --git a/ffc/representation.py b/ffc/representation.py
index 037acf5..221f6cf 100644
--- a/ffc/representation.py
+++ b/ffc/representation.py
@@ -69,6 +69,8 @@ def pick_representation(representation):
         from ffc import tensor as r
     elif representation == "uflacs":
         from ffc import uflacs as r
+    elif representation == "tsfc":
+        from ffc import tsfc as r
     else:
         error("Unknown representation: %s" % str(representation))
     return r
@@ -133,7 +135,8 @@ def compute_ir(analysis, prefix, parameters, jit=False):
 
     # Set code generation parameters (this is not actually a 'formatting'
     # parameter, used for table value clamping as well)
-    set_float_formatting(int(parameters["precision"]))
+    # FIXME: Global state?!
+    set_float_formatting(parameters["precision"])
 
     # Extract data from analysis
     form_datas, elements, element_numbers, coordinate_elements = analysis
@@ -185,7 +188,7 @@ def compute_ir(analysis, prefix, parameters, jit=False):
     info("Computing representation of integrals")
     irs = [_compute_integral_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit)
            for (form_id, fd) in enumerate(form_datas)]
-    ir_integrals = [ir for ir in chain(*irs) if ir is not None]
+    ir_integrals = list(chain(*irs))
 
     # Compute representation of forms
     info("Computing representation of forms")
@@ -753,8 +756,12 @@ def _evaluate_basis(ufl_element, fiat_element):
 
 def _tabulate_dof_coordinates(ufl_element, element):
     "Compute intermediate representation of tabulate_dof_coordinates."
+    if uses_integral_moments(element):
+        return {}
 
-    if uses_integral_moments(element) or not element.dual_basis()[0]:
+    # Bail out if any dual basis member is missing (element is not nodal),
+    # this is strictly not necessary but simpler
+    if any(L is None for L in element.dual_basis()):
         return {}
 
     cell = ufl_element.cell()
diff --git a/ffc/representationutils.py b/ffc/representationutils.py
index 66569df..467b9f1 100644
--- a/ffc/representationutils.py
+++ b/ffc/representationutils.py
@@ -24,9 +24,8 @@ quadrature and tensor representation."""
 
 import numpy
 
-from ufl.measure import integral_type_to_measure_name
+from ufl.measure import integral_type_to_measure_name, point_integral_types, facet_integral_types, custom_integral_types
 from ufl.cell import cellname2facetname
-from ufl import custom_integral_types
 
 from ffc.log import error
 from ffc.cpp import make_integral_classname
@@ -40,9 +39,9 @@ def create_quadrature_points_and_weights(integral_type, cell, degree, rule):
     "Create quadrature rule and return points and weights."
     if integral_type == "cell":
         (points, weights) = create_quadrature(cell.cellname(), degree, rule)
-    elif integral_type == "exterior_facet" or integral_type == "interior_facet":
+    elif integral_type in facet_integral_types:
         (points, weights) = create_quadrature(cellname2facetname[cell.cellname()], degree, rule)
-    elif integral_type == "vertex":
+    elif integral_type in point_integral_types:
         (points, weights) = create_quadrature("vertex", degree, rule)
     elif integral_type in custom_integral_types:
         (points, weights) = (None, None)
@@ -55,9 +54,9 @@ def integral_type_to_entity_dim(integral_type, tdim):
     "Given integral_type and domain tdim, return the tdim of the integration entity."
     if integral_type == "cell":
         entity_dim = tdim
-    elif (integral_type == "exterior_facet" or integral_type == "interior_facet"):
+    elif integral_type in facet_integral_types:
         entity_dim = tdim - 1
-    elif integral_type == "vertex":
+    elif integral_type in point_integral_types:
         entity_dim = 0
     elif integral_type in custom_integral_types:
         entity_dim = tdim
diff --git a/ffc/tensor/__init__.py b/ffc/tensor/__init__.py
index aa62fe9..ad0fe4a 100644
--- a/ffc/tensor/__init__.py
+++ b/ffc/tensor/__init__.py
@@ -1,4 +1,5 @@
 # -*- coding: utf-8 -*-
-from .tensorrepresentation import compute_integral_ir
-from .tensorgenerator import generate_integral_code
-from .costestimation import estimate_cost
+from ffc.tensor.tensorrepresentation import compute_integral_ir
+from ffc.tensor.tensoroptimization import optimize_integral_ir
+from ffc.tensor.tensorgenerator import generate_integral_code
+from ffc.tensor.costestimation import estimate_cost
diff --git a/ffc/tensor/monomialtransformation.py b/ffc/tensor/monomialtransformation.py
index f2209ee..45d14eb 100644
--- a/ffc/tensor/monomialtransformation.py
+++ b/ffc/tensor/monomialtransformation.py
@@ -507,6 +507,7 @@ class TransformedMonomial:
         factors += self.transforms
         return " * ".join([str(f) for f in factors]) + " | " + " * ".join([str(v) for v in self.arguments])
 
+
 # Index counters
 _current_secondary_index = 0
 _current_internal_index = 0
diff --git a/ffc/tensor/tensorgenerator.py b/ffc/tensor/tensorgenerator.py
index d9af8bc..b20c24a 100644
--- a/ffc/tensor/tensorgenerator.py
+++ b/ffc/tensor/tensorgenerator.py
@@ -28,7 +28,7 @@
 # Last changed: 2013-02-10
 
 # FFC modules
-from ffc.log import error
+from ffc.log import error, warning
 from ffc.cpp import format, remove_unused, count_ops
 
 # FFC tensor representation modules
@@ -40,6 +40,12 @@ def generate_integral_code(ir, prefix, parameters):
     "Generate code for integral from intermediate representation."
     code = initialize_integral_code(ir, prefix, parameters)
     code["tabulate_tensor"] = _tabulate_tensor(ir, parameters)
+
+    precision = ir["integrals_metadata"].get("precision")
+    if precision is not None and precision != parameters["precision"]:
+        warning("Ignoring precision in integral metadata compiled "
+                "using tensor representation. Not implemented.")
+
     return code
 
 
diff --git a/demo/AdaptivePoisson.ufl b/ffc/tensor/tensoroptimization.py
similarity index 68%
copy from demo/AdaptivePoisson.ufl
copy to ffc/tensor/tensoroptimization.py
index 46e1794..c8362b3 100644
--- a/demo/AdaptivePoisson.ufl
+++ b/ffc/tensor/tensoroptimization.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2010 Marie E. Rognes
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Jan Blechta
 #
 # This file is part of FFC.
 #
@@ -15,15 +16,10 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with FFC. If not, see <http://www.gnu.org/licenses/>.
 
-element = FiniteElement("Lagrange", triangle, 1)
-element2 = FiniteElement("Lagrange", triangle, 3)
+from ffc.log import info
 
-u = TrialFunction(element)
-v = TestFunction(element)
+def optimize_integral_ir(ir, parameters):
+    "Compute optimized intermediate representation of integral."
 
-f = Coefficient(element2)
-g = Coefficient(element)
-
-a = inner(grad(u), grad(v))*dx()
-L = f*v*dx() + g*v*ds()
-M = u*dx()
+    info("Ignoring optimization request for tensor representation")
+    return ir
diff --git a/ffc/tensor/tensorreordering.py b/ffc/tensor/tensorreordering.py
index 1fbe293..76119c3 100644
--- a/ffc/tensor/tensorreordering.py
+++ b/ffc/tensor/tensorreordering.py
@@ -45,9 +45,9 @@ def reorder_entries(terms):
         for i in range(len(restrictions)):
             dim = dims[i]
             if restrictions[i] == "+":
-                position = position + [slice(0, dim / 2)]
+                position = position + [slice(0, dim // 2)]
             elif restrictions[i] == "-":
-                position = position + [slice(dim / 2, dim)]
+                position = position + [slice(dim // 2, dim)]
             else:
                 position = position + [slice(0, dim)]
 
diff --git a/ffc/tsfc/__init__.py b/ffc/tsfc/__init__.py
new file mode 100644
index 0000000..0640b7e
--- /dev/null
+++ b/ffc/tsfc/__init__.py
@@ -0,0 +1,3 @@
+from ffc.tsfc.tsfcrepresentation import compute_integral_ir
+from ffc.tsfc.tsfcoptimization import optimize_integral_ir
+from ffc.tsfc.tsfcgenerator import generate_integral_code
diff --git a/ffc/tsfc/tsfcgenerator.py b/ffc/tsfc/tsfcgenerator.py
new file mode 100644
index 0000000..e9b94a8
--- /dev/null
+++ b/ffc/tsfc/tsfcgenerator.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2016 Jan Blechta
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+import coffee.base as coffee
+from coffee.visitors import Find
+
+from ffc.log import info
+from ffc.representationutils import initialize_integral_code
+
+
+def generate_integral_code(ir, prefix, parameters):
+    "Generate code for integral from intermediate representation."
+
+    info("Generating code from tsfc representation")
+
+    # Generate generic ffc code snippets
+    code = initialize_integral_code(ir, prefix, parameters)
+
+    # Generate tabulate_tensor body from ast
+    ast = ir["tsfc"]
+    tsfc_code = "".join(b.gencode() for b in ast.body)
+    tsfc_code = tsfc_code.replace("#pragma coffee", "//#pragma coffee") # FIXME
+    code["tabulate_tensor"] = tsfc_code
+
+    includes = set()
+    includes.update(ir.get("additional_includes_set", ()))
+    includes.update(ast.headers)
+    includes.add("#include <cstring>")  # memset
+    if any(node.funcall.symbol.startswith("boost::math::")
+           for node in Find(coffee.FunCall).visit(ast)[coffee.FunCall]):
+        includes.add("#include <boost/math/special_functions.hpp>")
+    code["additional_includes_set"] = includes
+
+    return code
diff --git a/ffc/tsfc/tsfcoptimization.py b/ffc/tsfc/tsfcoptimization.py
new file mode 100644
index 0000000..0a8c3ff
--- /dev/null
+++ b/ffc/tsfc/tsfcoptimization.py
@@ -0,0 +1,19 @@
+from ffc.log import error
+
+import traceback
+import os
+
+
+try:
+    from coffee.plan import ASTKernel
+except ImportError:
+    msg = traceback.format_exc()
+    def optimize_integral_ir(ir, parameters):
+        error("Failed to import coffee.plan.ASTKernel needed for optimized tsfc"
+              " representation; the error message was: {}{}"
+              .format(os.linesep, msg))
+else:
+    def optimize_integral_ir(ir, parameters):
+        knl = ASTKernel(ir["tsfc"])
+        knl.plan_cpu(dict(optlevel='O2'))  # TODO: optlevel from parameters
+        return ir  # AST was modified in-place
diff --git a/ffc/tsfc/tsfcrepresentation.py b/ffc/tsfc/tsfcrepresentation.py
new file mode 100644
index 0000000..2098efc
--- /dev/null
+++ b/ffc/tsfc/tsfcrepresentation.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2016 Jan Blechta
+#
+# This file is part of FFC.
+#
+# FFC is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# FFC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with FFC. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import
+
+from ffc.log import info
+from ffc.representationutils import initialize_integral_ir
+
+from tsfc.driver import compile_integral
+import tsfc.kernel_interface.ufc as ufc_interface
+
+
+def compute_integral_ir(integral_data,
+                        form_data,
+                        form_id,
+                        element_numbers,
+                        classnames,
+                        parameters):
+    "Compute intermediate represention of integral."
+
+    info("Computing tsfc representation")
+
+    # Initialise representation
+    ir = initialize_integral_ir("tsfc", integral_data, form_data, form_id)
+
+    # TSFC treats None and unset differently, so remove None values.
+    parameters = {k: v for k, v in parameters.items() if v is not None}
+
+    # TSFC has switched to default "coffee" mode
+    parameters.setdefault("mode", "vanilla")
+
+    # Store tsfc generated part separately
+    ir["tsfc"] = compile_integral(integral_data, form_data, None, parameters,
+                                  interface=ufc_interface)
+
+    return ir
diff --git a/ffc/uflacs/analysis/balancing.py b/ffc/uflacs/analysis/balancing.py
index 6616da2..6e639ce 100644
--- a/ffc/uflacs/analysis/balancing.py
+++ b/ffc/uflacs/analysis/balancing.py
@@ -51,8 +51,6 @@ def balance_modified_terminal(expr):
     # Build list of modifier layers
     layers = [expr]
     while not expr._ufl_is_terminal_:
-        if not expr._ufl_is_terminal_modifier_:
-            import IPython; IPython.embed()
         assert expr._ufl_is_terminal_modifier_
         expr = expr.ufl_operands[0]
         layers.append(expr)
diff --git a/ffc/uflacs/analysis/crsarray.py b/ffc/uflacs/analysis/crsarray.py
index af5b4e5..42c059d 100644
--- a/ffc/uflacs/analysis/crsarray.py
+++ b/ffc/uflacs/analysis/crsarray.py
@@ -20,6 +20,8 @@
 
 import numpy
 
+def sufficient_int(maxval):
+    return numpy.int16 if maxval < 2**15 else numpy.int32
 
 class CRSArray(object):
     """An array of variable length dense arrays.
@@ -34,7 +36,7 @@ class CRSArray(object):
     There is no column index.
     """
     def __init__(self, row_capacity, element_capacity, dtype):
-        itype = numpy.int16 if row_capacity < 2**15 else numpy.int32
+        itype = sufficient_int(element_capacity)
         self.row_offsets = numpy.zeros(row_capacity + 1, dtype=itype)
         self.data = numpy.zeros(element_capacity, dtype=dtype)
         self.num_rows = 0
diff --git a/ffc/uflacs/analysis/graph_dependencies.py b/ffc/uflacs/analysis/dependencies.py
similarity index 96%
rename from ffc/uflacs/analysis/graph_dependencies.py
rename to ffc/uflacs/analysis/dependencies.py
index 4cc04ea..95222c8 100644
--- a/ffc/uflacs/analysis/graph_dependencies.py
+++ b/ffc/uflacs/analysis/dependencies.py
@@ -22,13 +22,13 @@ import numpy
 
 from six.moves import xrange as range
 
-from ffc.uflacs.analysis.crsarray import CRSArray
+from ffc.uflacs.analysis.crsarray import CRSArray, sufficient_int
 
 
 def compute_dependencies(e2i, V, ignore_terminal_modifiers=True):
     # Use numpy int type sufficient to hold num_rows
     num_rows = len(V)
-    itype = numpy.int16 if num_rows < 2**15 else numpy.int32
+    itype = sufficient_int(num_rows)
 
     # Preallocate CRSArray matrix of sufficient capacity
     num_nonzeros = sum(len(v.ufl_operands) for v in V)
diff --git a/ffc/uflacs/analysis/factorization.py b/ffc/uflacs/analysis/factorization.py
index 22dd5ea..fb4ed30 100644
--- a/ffc/uflacs/analysis/factorization.py
+++ b/ffc/uflacs/analysis/factorization.py
@@ -35,7 +35,7 @@ from ufl.algorithms import extract_type
 
 from ffc.log import error
 
-from ffc.uflacs.analysis.graph_dependencies import compute_dependencies
+from ffc.uflacs.analysis.dependencies import compute_dependencies
 from ffc.uflacs.analysis.modified_terminals import analyse_modified_terminal, strip_modified_terminal
 
 
@@ -118,36 +118,17 @@ def add_to_fv(expr, FV, e2fi):
 noargs = {}
 
 
-def handle_modified_terminal(si, v, SV_factors, FV, e2fi, arg_indices, AV, sv2av):
-    # v is a modified terminal...
-    if si in arg_indices:
-        # ... a modified Argument
-        argkey = (si,)
-        fi = None
-
-        # Adding 1 as an expression allows avoiding special representation by representing "v" as "1*v"
-        one = add_to_fv(as_ufl(1.0), FV, e2fi)
-        factors = {argkey: one}
-
-        assert AV[sv2av[si]] == v
-    else:
-        # ... record a non-argument modified terminal
-        factors = noargs
-        fi = add_to_fv(v, FV, e2fi)
-    return fi, factors
-
-
-def handle_sum(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+def handle_sum(v, si, deps, SV_factors, FV, sv2fv, e2fi):
     if len(deps) != 2:
         error("Assuming binary sum here. This can be fixed if needed.")
+
     fac0 = SV_factors[deps[0]]
     fac1 = SV_factors[deps[1]]
-
-    argkeys = sorted(set(iterkeys(fac0)) | set(iterkeys(fac1)))
+    argkeys = set(fac0) | set(fac1)
 
     if argkeys:  # f*arg + g*arg = (f+g)*arg
+        argkeys = sorted(argkeys)
         keylen = len(argkeys[0])
-        fi = None
         factors = {}
         for argkey in argkeys:
             if len(argkey) != keylen:
@@ -167,12 +148,12 @@ def handle_sum(si, v, deps, SV_factors, FV, sv2fv, e2fi):
 
     else:  # non-arg + non-arg
         factors = noargs
-        fi = add_to_fv(v, FV, e2fi)
+        sv2fv[si] = add_to_fv(v, FV, e2fi)
 
-    return fi, factors
+    return factors
 
 
-def handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+def handle_product(v, si, deps, SV_factors, FV, sv2fv, e2fi):
     if len(deps) != 2:
         error("Assuming binary product here. This can be fixed if needed.")
     fac0 = SV_factors[deps[0]]
@@ -184,8 +165,8 @@ def handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi):
         f0 = FV[sv2fv[deps[0]]]
         f1 = FV[sv2fv[deps[1]]]
         assert f1 * f0 == v
-        fi = add_to_fv(v, FV, e2fi)
-        assert FV[fi] == v
+        sv2fv[si] = add_to_fv(v, FV, e2fi)
+        assert FV[sv2fv[si]] == v
 
     elif not fac0:  # non-arg * arg
         # Record products of non-arg operand with each factor of arg-dependent operand
@@ -194,7 +175,6 @@ def handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi):
         for k1 in sorted(fac1):
             fi1 = fac1[k1]
             factors[k1] = add_to_fv(f0 * FV[fi1], FV, e2fi)
-        fi = None
 
     elif not fac1:  # arg * non-arg
         # Record products of non-arg operand with each factor of arg-dependent operand
@@ -203,7 +183,6 @@ def handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi):
         for k0 in sorted(fac0):
             f0 = FV[fac0[k0]]
             factors[k0] = add_to_fv(f1 * f0, FV, e2fi)
-        fi = None
 
     else:  # arg * arg
         # Record products of each factor of arg-dependent operand
@@ -214,12 +193,11 @@ def handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi):
                 f1 = FV[fac1[k1]]
                 argkey = tuple(sorted(k0 + k1))  # sort key for canonical representation
                 factors[argkey] = add_to_fv(f0 * f1, FV, e2fi)
-        fi = None
 
-    return fi, factors
+    return factors
 
 
-def handle_division(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+def handle_division(v, si, deps, SV_factors, FV, sv2fv, e2fi):
     fac0 = SV_factors[deps[0]]
     fac1 = SV_factors[deps[1]]
     assert not fac1, "Cannot divide by arguments."
@@ -231,17 +209,16 @@ def handle_division(si, v, deps, SV_factors, FV, sv2fv, e2fi):
         for k0 in sorted(fac0):
             f0 = FV[fac0[k0]]
             factors[k0] = add_to_fv(f0 / f1, FV, e2fi)
-        fi = None
 
     else:  # non-arg / non-arg
         # Record non-argument subexpression
         factors = noargs
-        fi = add_to_fv(v, FV, e2fi)
+        sv2fv[si] = add_to_fv(v, FV, e2fi)
 
-    return fi, factors
+    return factors
 
 
-def handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+def handle_conditional(v, si, deps, SV_factors, FV, sv2fv, e2fi):
     fac0 = SV_factors[deps[0]]
     fac1 = SV_factors[deps[1]]
     fac2 = SV_factors[deps[2]]
@@ -249,7 +226,7 @@ def handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi):
 
     if not (fac1 or fac2):  # non-arg ? non-arg : non-arg
         # Record non-argument subexpression
-        fi = add_to_fv(v, FV, e2fi)
+        sv2fv[si] = add_to_fv(v, FV, e2fi)
         factors = noargs
     else:
         f0 = FV[sv2fv[deps[0]]]
@@ -262,15 +239,12 @@ def handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi):
         assert () not in fac1
         assert () not in fac2
 
-        fi = None
-        factors = {}
-
         z = as_ufl(0.0)
-        zfi = add_to_fv(z, FV, e2fi)  # TODO: flake8 complains zfi is unused, is that ok?
 
         # In general, can decompose like this:
         #    conditional(c, sum_i fi*ui, sum_j fj*uj) -> sum_i conditional(c, fi, 0)*ui + sum_j conditional(c, 0, fj)*uj
         mas = sorted(set(fac1.keys()) | set(fac2.keys()))
+        factors = {}
         for k in mas:
             fi1 = fac1.get(k)
             fi2 = fac2.get(k)
@@ -278,17 +252,17 @@ def handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi):
             f2 = z if fi2 is None else FV[fi2]
             factors[k] = add_to_fv(conditional(f0, f1, f2), FV, e2fi)
 
-    return fi, factors
+    return factors
 
 
-def handle_operator(si, v, deps, SV_factors, FV, sv2fv, e2fi):
+def handle_operator(v, si, deps, SV_factors, FV, sv2fv, e2fi):
     # Error checking
-    if any(SV_factors[deps[j]] for j in range(len(deps))):
+    if any(SV_factors[d] for d in deps):
         error("Assuming that a {0} cannot be applied to arguments. If this is wrong please report a bug.".format(type(v)))
     # Record non-argument subexpression
-    fi = add_to_fv(v, FV, e2fi)
+    sv2fv[si] = add_to_fv(v, FV, e2fi)
     factors = noargs
-    return fi, factors
+    return factors
 
 
 def compute_argument_factorization(SV, SV_deps, SV_targets, rank):
@@ -336,8 +310,15 @@ def compute_argument_factorization(SV, SV_deps, SV_targets, rank):
     FV = []
     e2fi = {}
 
-    # Hack to later build dependencies for the FV entries that change K*K -> K**2
-    two = add_to_fv(as_ufl(2), FV, e2fi)  # FIXME: Might need something more robust here
+    # Adding 0.0 as an expression to fix issue in conditional
+    zero_index = add_to_fv(as_ufl(0.0), FV, e2fi)
+
+    # Adding 1.0 as an expression allows avoiding special representation
+    # of arguments when first visited by representing "v" as "1*v"
+    one_index = add_to_fv(as_ufl(1.0), FV, e2fi)
+
+    # Adding 2 as an expression fixes an issue with FV entries that change K*K -> K**2
+    two_index = add_to_fv(as_ufl(2), FV, e2fi)
 
     # Intermediate factorization for each vertex in SV on the format
     # SV_factors[si] = None # if SV[si] does not depend on arguments
@@ -346,28 +327,37 @@ def compute_argument_factorization(SV, SV_deps, SV_targets, rank):
     #   argkey is a tuple with indices into SV for each of the argument components SV[si] depends on
     # SV_factors[si] = { argkey1: fi1, argkey2: fi2, ... } # if SV[si] is a linear combination of multiple argkey configurations
     SV_factors = numpy.empty(len(SV), dtype=object)
-    sv2fv = numpy.zeros(len(SV), dtype=int)
+    si2fi = numpy.zeros(len(SV), dtype=int)
 
     # Factorize each subexpression in order:
     for si, v in enumerate(SV):
         deps = SV_deps[si]
 
-        # These handlers insert values in sv2fv and SV_factors
+        # These handlers insert values in si2fi and SV_factors
         if not len(deps):
-            fi, factors = handle_modified_terminal(si, v, SV_factors, FV, e2fi, arg_indices, AV, sv2av)
-        elif isinstance(v, Sum):
-            fi, factors = handle_sum(si, v, deps, SV_factors, FV, sv2fv, e2fi)
-        elif isinstance(v, Product):
-            fi, factors = handle_product(si, v, deps, SV_factors, FV, sv2fv, e2fi)
-        elif isinstance(v, Division):
-            fi, factors = handle_division(si, v, deps, SV_factors, FV, sv2fv, e2fi)
-        elif isinstance(v, Conditional):
-            fi, factors = handle_conditional(si, v, deps, SV_factors, FV, sv2fv, e2fi)
-        else:  # All other operators
-            fi, factors = handle_operator(si, v, deps, SV_factors, FV, sv2fv, e2fi)
-
-        if fi is not None:
-            sv2fv[si] = fi
+            if si in arg_indices:
+                # v is a modified Argument
+                factors = { (si,): one_index }
+            else:
+                # v is a modified non-Argument terminal
+                si2fi[si] = add_to_fv(v, FV, e2fi)
+                factors = noargs
+        else:
+            # These quantities could be better input args to handlers:
+            #facs = [SV_factors[d] for d in deps]
+            #fs = [FV[sv2fv[d]] for d in deps]
+            if isinstance(v, Sum):
+                handler = handle_sum
+            elif isinstance(v, Product):
+                handler = handle_product
+            elif isinstance(v, Division):
+                handler = handle_division
+            elif isinstance(v, Conditional):
+                handler = handle_conditional
+            else:  # All other operators
+                handler = handle_operator
+            factors = handler(v, si, deps, SV_factors, FV, si2fi, e2fi)
+
         SV_factors[si] = factors
 
     assert not noargs, "This dict was not supposed to be filled with anything!"
@@ -382,7 +372,7 @@ def compute_argument_factorization(SV, SV_deps, SV_targets, rank):
         if SV_factors[si] == {}:
             if rank == 0:
                 # Functionals and expressions: store as no args * factor
-                factors = { (): sv2fv[si] }
+                factors = { (): si2fi[si] }
             else:
                 # Zero form of arity 1 or higher: make factors empty
                 factors = {}
diff --git a/ffc/uflacs/analysis/graph_ssa.py b/ffc/uflacs/analysis/graph_ssa.py
index 36a7cd2..61610f2 100644
--- a/ffc/uflacs/analysis/graph_ssa.py
+++ b/ffc/uflacs/analysis/graph_ssa.py
@@ -102,6 +102,7 @@ def mark_partitions(V, active, dependencies, rank,
         partitions[i] = p
     return partitions
 
+
 """
 def build_factorized_partitions():
     num_points = [3]
@@ -188,6 +189,7 @@ def compute_cache_scores(V, active, dependencies, inverse_dependencies, partitio
         score[i] = s
     return score
 
+
 import heapq
 
 
diff --git a/ffc/uflacs/backends/ffc/access.py b/ffc/uflacs/backends/ffc/access.py
index 4fa8262..1bb0e37 100644
--- a/ffc/uflacs/backends/ffc/access.py
+++ b/ffc/uflacs/backends/ffc/access.py
@@ -20,11 +20,11 @@
 
 from ufl.corealg.multifunction import MultiFunction
 from ufl.permutation import build_component_numbering
+from ufl.measure import custom_integral_types
 
-from ffc.log import error, warning
+from ffc.log import error, warning, debug
 
 from ffc.uflacs.backends.ffc.symbols import FFCBackendSymbols
-from ffc.uflacs.backends.ffc.common import physical_quadrature_integral_types
 
 
 class FFCBackendAccess(MultiFunction):
@@ -34,7 +34,6 @@ class FFCBackendAccess(MultiFunction):
         MultiFunction.__init__(self)
 
         # Store ir and parameters
-        self.ir = ir
         self.entitytype = ir["entitytype"]
         self.integral_type = ir["integral_type"]
         self.language = language
@@ -72,66 +71,29 @@ class FFCBackendAccess(MultiFunction):
         return L.LiteralFloat(float(e))
 
 
-    def argument(self, e, mt, tabledata, num_points):
-        L = self.language
-        # Expecting only local derivatives and values here
-        assert not mt.global_derivatives
-        # assert mt.global_component is None
-
-        # No need to store basis function value in its own variable, just get table value directly
-        #uname, begin, end, ttype = tabledata
-        uname, begin, end = tabledata
-        table_types = self.ir["expr_irs"][num_points]["table_types"]
-        ttype = table_types[uname]
-
-        if ttype == "zeros":
-            error("Not expecting zero arguments to get this far.")
-            return L.LiteralFloat(0.0)
-        elif ttype == "ones":
-            warning("Should simplify ones arguments before getting this far.")
-            return L.LiteralFloat(1.0)
-
-        if ttype in ("uniform", "fixed"):
-            entity = 0
-        else:
-            entity = self.symbols.entity(self.entitytype, mt.restriction)
+    #def quadrature_weight(self, e, mt, tabledata, num_points):
+    #    "Quadrature weights are precomputed and need no code."
+    #    return []
 
-        if ttype in ("piecewise", "fixed"):
-            iq = 0
-        else:
-            iq = self.symbols.quadrature_loop_index(num_points)
 
-        if ttype == "quadrature":
-            warning("Should simplify quadrature element arguments before getting this far.")
-            idof = iq
-        else:
-            idof = self.symbols.argument_loop_index(mt.terminal.number())
+    def coefficient(self, e, mt, tabledata, num_points):
+        ttype = tabledata.ttype
 
-        uname = L.Symbol(uname)
-        return uname[entity][iq][idof - begin]
+        assert ttype != "zeros"
 
+        begin, end = tabledata.dofrange
 
-    def coefficient(self, e, mt, tabledata, num_points):
-        # TODO: Passing type along with tabledata would make a lot of code cleaner
-        #uname, begin, end, ttype = tabledata
-        uname, begin, end = tabledata
-        table_types = self.ir["expr_irs"][num_points]["table_types"]
-        ttype = table_types[uname]
-
-        if ttype == "zeros":
-            # FIXME: Remove at earlier stage so dependent code can also be removed
-            warning("Not expecting zero coefficients to get this far.")
-            L = self.language
-            return L.LiteralFloat(0.0)
-        elif ttype == "ones" and (end - begin) == 1:
+        if ttype == "ones" and (end - begin) == 1:
             # f = 1.0 * f_{begin}, just return direct reference to dof array at dof begin
             # (if mt is restricted, begin contains cell offset)
             idof = begin
             return self.symbols.coefficient_dof_access(mt.terminal, idof)
         elif ttype == "quadrature":
+            # Dofmap should be contiguous in this case
+            assert len(tabledata.dofmap) == end - begin
             # f(x_q) = sum_i f_i * delta_iq = f_q, just return direct
             # reference to dof array at quadrature point index + begin
-            iq = self.symbols.quadrature_loop_index(num_points)
+            iq = self.symbols.quadrature_loop_index()
             idof = begin + iq
             return self.symbols.coefficient_dof_access(mt.terminal, idof)
         else:
@@ -139,12 +101,6 @@ class FFCBackendAccess(MultiFunction):
             return self.symbols.coefficient_value(mt)  #, num_points)
 
 
-    def quadrature_weight(self, e, mt, tabledata, num_points):
-        weight = self.symbols.weights_array(num_points)
-        iq = self.symbols.quadrature_loop_index(num_points)
-        return weight[iq]
-
-
     def spatial_coordinate(self, e, mt, tabledata, num_points):
         #L = self.language
         if mt.global_derivatives:
@@ -152,15 +108,13 @@ class FFCBackendAccess(MultiFunction):
         if mt.averaged:
             error("Not expecting average of SpatialCoordinates.")
 
-        if self.integral_type in physical_quadrature_integral_types:
-            # FIXME: Jacobian may need adjustment for physical_quadrature_integral_types
+        if self.integral_type in custom_integral_types:
             if mt.local_derivatives:
                 error("FIXME: Jacobian in custom integrals is not implemented.")
 
-            # Physical coordinates are available in given variables
-            assert num_points is None
-            x = self.symbols.points_array(num_points)
-            iq = self.symbols.quadrature_loop_index(num_points)
+            # Access predefined quadrature points table
+            x = self.symbols.custom_points_table()
+            iq = self.symbols.quadrature_loop_index()
             gdim, = mt.terminal.ufl_shape
             if gdim == 1:
                 index = iq
@@ -182,9 +136,10 @@ class FFCBackendAccess(MultiFunction):
             error("Not expecting average of CellCoordinate.")
 
         if self.integral_type == "cell" and not mt.restriction:
-            X = self.symbols.points_array(num_points)
+            # Access predefined quadrature points table
+            X = self.symbols.points_table(num_points)
             tdim, = mt.terminal.ufl_shape
-            iq = self.symbols.quadrature_loop_index(num_points)
+            iq = self.symbols.quadrature_loop_index()
             if num_points == 1:
                 index = mt.flat_component
             elif tdim == 1:
@@ -216,8 +171,8 @@ class FFCBackendAccess(MultiFunction):
                 # 0D vertex coordinate
                 warning("Vertex coordinate is always 0, should get rid of this in ufl geometry lowering.")
                 return L.LiteralFloat(0.0)
-            Xf = self.points_array(num_points)
-            iq = self.symbols.quadrature_loop_index(num_points)
+            Xf = self.points_table(num_points)
+            iq = self.symbols.quadrature_loop_index()
             assert 0 <= mt.flat_component < (tdim-1)
             if num_points == 1:
                 index = mt.flat_component
diff --git a/ffc/uflacs/backends/ffc/common.py b/ffc/uflacs/backends/ffc/common.py
index 3013c68..4865363 100644
--- a/ffc/uflacs/backends/ffc/common.py
+++ b/ffc/uflacs/backends/ffc/common.py
@@ -22,9 +22,6 @@
 from ffc.log import error
 
 
-physical_quadrature_integral_types = ("custom", "cutcell", "interface", "overlap")
-
-
 # TODO: Move somewhere else
 def num_coordinate_component_dofs(coordinate_element):
     """Get the number of dofs for a coordinate component for this degree.
diff --git a/ffc/uflacs/backends/ffc/definitions.py b/ffc/uflacs/backends/ffc/definitions.py
index 983bac5..7c03faa 100644
--- a/ffc/uflacs/backends/ffc/definitions.py
+++ b/ffc/uflacs/backends/ffc/definitions.py
@@ -19,11 +19,11 @@
 """FFC/UFC specific variable definitions."""
 
 from ufl.corealg.multifunction import MultiFunction
+from ufl.measure import custom_integral_types
 
 from ffc.log import error, warning
 
 from ffc.uflacs.backends.ffc.symbols import FFCBackendSymbols
-from ffc.uflacs.backends.ffc.common import physical_quadrature_integral_types
 from ffc.uflacs.backends.ffc.common import num_coordinate_component_dofs
 
 
@@ -33,7 +33,6 @@ class FFCBackendDefinitions(MultiFunction):
         MultiFunction.__init__(self)
 
         # Store ir and parameters
-        self.ir = ir
         self.integral_type = ir["integral_type"]
         self.entitytype = ir["entitytype"]
         self.language = language
@@ -51,9 +50,9 @@ class FFCBackendDefinitions(MultiFunction):
         error("Unhandled type {0}".format(type(t)))
 
 
-    def quadrature_weight(self, e, mt, tabledata, num_points, access):
-        "Quadrature weights are precomputed and need no code."
-        return []
+    #def quadrature_weight(self, e, mt, tabledata, num_points, access):
+    #    "Quadrature weights are precomputed and need no code."
+    #    return []
 
 
     def constant_value(self, e, mt, tabledata, num_points, access):
@@ -70,18 +69,13 @@ class FFCBackendDefinitions(MultiFunction):
         "Return definition code for coefficients."
         L = self.language
 
-        # No need to store basis function value in its own variable,
-        # just get table value directly
-        #uname, begin, end, ttype = tabledata
-        uname, begin, end = tabledata
-        table_types = self.ir["expr_irs"][num_points]["table_types"]
-        ttype = table_types[uname]
+        ttype = tabledata.ttype
+        begin, end = tabledata.dofrange
 
         #fe_classname = ir["classnames"]["finite_element"][t.ufl_element()]
 
-        # FIXME: remove at earlier stage so dependent code can also be removed
         if ttype == "zeros":
-            warning("Not expecting zero coefficients to get this far.")
+            debug("Not expecting zero coefficients to get this far.")
             return []
 
         # For a constant coefficient we reference the dofs directly, so no definition needed
@@ -94,35 +88,29 @@ class FFCBackendDefinitions(MultiFunction):
 
         assert begin < end
 
-        # Entity number
-        if ttype in ("uniform", "fixed"):
-            entity = 0
-        else:
-            entity = self.symbols.entity(self.entitytype, mt.restriction)
-
-        # This check covers "piecewise constant over points on entity"
-        if ttype in ("piecewise", "fixed"):
-            iq = 0
-        else:
-            iq = self.symbols.quadrature_loop_index(num_points)
-
-        idof = self.symbols.coefficient_dof_sum_index()
-        dof_access = self.symbols.coefficient_dof_access(mt.terminal, idof)
-
-        if ttype == "ones":
-            # Don't think this can actually happen
-            table_access = L.LiteralFloat(1.0)
+        # Get access to element table
+        FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction)
+
+        unroll = len(tabledata.dofmap) != end - begin
+        #unroll = True
+        if unroll:
+            # TODO: Could also use a generated constant dofmap here like in block code
+            # Unrolled loop to accumulate linear combination of dofs and tables
+            values = [self.symbols.coefficient_dof_access(mt.terminal, idof) * FE[i]
+                      for i, idof in enumerate(tabledata.dofmap)]
+            value = L.Sum(values)
+            code = [
+                L.VariableDecl("const double", access, value)
+                ]
         else:
-            uname = L.Symbol(uname)
-            table_access = uname[entity][iq][idof - begin]
-
-        # Loop to accumulate linear combination of dofs and tables
-        code = [
-            L.VariableDecl("double", access, 0.0),
-            L.ForRange(idof, begin, end,
-                       body=[L.AssignAdd(access, dof_access * table_access)])
-            ]
-
+            # Loop to accumulate linear combination of dofs and tables
+            ic = self.symbols.coefficient_dof_sum_index()
+            dof_access = self.symbols.coefficient_dof_access(mt.terminal, ic + begin)
+            code = [
+                L.VariableDecl("double", access, 0.0),
+                L.ForRange(ic, 0, end - begin,
+                           body=[L.AssignAdd(access, dof_access * FE[ic])])
+                ]
         return code
 
 
@@ -142,71 +130,64 @@ class FFCBackendDefinitions(MultiFunction):
         # this component as linear combination of coordinate_dofs "dofs" and table
 
         # Find table name and dof range it corresponds to
-        #uname, begin, end, ttype = tabledata
-        uname, begin, end = tabledata
-        table_types = self.ir["expr_irs"][num_points]["table_types"]
-        ttype = table_types[uname]
+        ttype = tabledata.ttype
+        begin, end = tabledata.dofrange
 
         assert end - begin <= num_scalar_dofs
         assert ttype != "zeros"
+        assert ttype != "quadrature"
         #xfe_classname = ir["classnames"]["finite_element"][coordinate_element]
         #sfe_classname = ir["classnames"]["finite_element"][coordinate_element.sub_elements()[0]]
 
-        # Entity number
-        if ttype in ("uniform", "fixed"):
-            entity = 0
-        else:
-            entity = self.symbols.entity(self.entitytype, mt.restriction)
+        # Get access to element table
+        FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction)
 
-        # This check covers "piecewise constant over points on entity"
-        if ttype in ("piecewise", "fixed"):
-            iq = 0
-        else:
-            iq = self.symbols.quadrature_loop_index(num_points)
-
-        assert ttype != "quadrature"
-            
-        # Make indexable symbol
-        uname = L.Symbol(uname)
+        inline = True
 
         if ttype == "zeros":
+            # Not sure if this will ever happen
+            debug("Not expecting zeros for %s." % (e._ufl_class_.__name__,))
             code = [
                 L.VariableDecl("const double", access, L.LiteralFloat(0.0))
                 ]
         elif ttype == "ones":
-            # Not sure if this ever happens
+            # Not sure if this will ever happen
+            debug("Not expecting ones for %s." % (e._ufl_class_.__name__,))
             # Inlined version (we know this is bounded by a small number)
             dof_access = self.symbols.domain_dofs_access(gdim, num_scalar_dofs,
                                                          mt.restriction,
                                                          self.interleaved_components)
-            value = L.Sum([dof_access[idof] for idof in range(begin, end)])
+            values = [dof_access[idof] for idof in tabledata.dofmap]
+            value = L.Sum(values)
             code = [
                 L.VariableDecl("const double", access, value)
                 ]
-        elif True:
+        elif inline:
             # Inlined version (we know this is bounded by a small number)
             dof_access = self.symbols.domain_dofs_access(gdim, num_scalar_dofs,
                                                          mt.restriction,
                                                          self.interleaved_components)
             # Inlined loop to accumulate linear combination of dofs and tables
-            value = L.Sum([dof_access[idof] * uname[entity][iq][idof - begin]
-                           for idof in range(begin, end)])
+            value = L.Sum([dof_access[idof] * FE[i]
+                           for i, idof in enumerate(tabledata.dofmap)])
             code = [
                 L.VariableDecl("const double", access, value)
                 ]
         else:  # TODO: Make an option to test this version for performance
+            # Assuming contiguous dofmap here
+            assert len(tabledata.dofmap) == end - begin
+
             # Generated loop version:
-            coefficient_dof = self.symbols.coefficient_dof_sum_index()
-            dof_access = self.symbols.domain_dof_access(coefficient_dof, mt.flat_component,
+            ic = self.symbols.coefficient_dof_sum_index()
+            dof_access = self.symbols.domain_dof_access(ic + begin, mt.flat_component,
                                                         gdim, num_scalar_dofs,
                                                         mt.restriction, self.interleaved_components)
-            table_access = uname[entity][iq][coefficient_dof]
 
             # Loop to accumulate linear combination of dofs and tables
             code = [
                 L.VariableDecl("double", access, 0.0),
-                L.ForRange(coefficient_dof, begin, end,
-                           body=[L.AssignAdd(access, dof_access * table_access)])
+                L.ForRange(ic, 0, end - begin,
+                           body=[L.AssignAdd(access, dof_access * FE[ic])])
                 ]
 
         return code
@@ -224,8 +205,8 @@ class FFCBackendDefinitions(MultiFunction):
         If reference facet coordinates are given:
           x = sum_k xdof_k xphi_k(Xf)
         """
-        if self.integral_type in physical_quadrature_integral_types:
-            # FIXME: Jacobian may need adjustment for physical_quadrature_integral_types
+        if self.integral_type in custom_integral_types:
+            # FIXME: Jacobian may need adjustment for custom_integral_types
             if mt.local_derivatives:
                 error("FIXME: Jacobian in custom integrals is not implemented.")
             return []
@@ -260,7 +241,7 @@ class FFCBackendDefinitions(MultiFunction):
 
         J = sum_k xdof_k grad_X xphi_k(X)
         """
-        # TODO: Jacobian may need adjustment for physical_quadrature_integral_types
+        # TODO: Jacobian may need adjustment for custom_integral_types
         return self._define_coordinate_dofs_lincomb(e, mt, tabledata, num_points, access)
 
 
diff --git a/ffc/uflacs/backends/ffc/symbols.py b/ffc/uflacs/backends/ffc/symbols.py
index d748ea2..fa87697 100644
--- a/ffc/uflacs/backends/ffc/symbols.py
+++ b/ffc/uflacs/backends/ffc/symbols.py
@@ -70,10 +70,12 @@ class FFCBackendSymbols(object):
         self.restriction_postfix = { r: ufc_restriction_postfix(r)
                                      for r in ("+", "-", None) }
 
+
     def element_tensor(self):
         "Symbol for the element tensor itself."
         return self.S("A")
 
+
     def entity(self, entitytype, restriction):
         "Entity index for lookup in element tables."
         if entitytype == "cell":
@@ -86,86 +88,127 @@ class FFCBackendSymbols(object):
         else:
             error("Unknown entitytype {}".format(entitytype))
 
+
     def cell_orientation_argument(self, restriction):
         "Cell orientation argument in ufc. Not same as cell orientation in generated code."
         return self.S("cell_orientation" + ufc_restriction_postfix(restriction))
 
+
     def cell_orientation_internal(self, restriction):
         "Internal value for cell orientation in generated code."
         return self.S("co" + ufc_restriction_postfix(restriction))
 
-    def num_quadrature_points(self, num_points):
-        if num_points is None:
-            return self.S("num_quadrature_points")
-        else:
-            return self.L.LiteralInt(num_points)
 
-    def weights_array(self, num_points):
-        if num_points is None:
-            return self.S("quadrature_weights")
-        else:
-            return self.S("weights%d" % (num_points,))
+    def argument_loop_index(self, iarg):
+        "Loop index for argument #iarg."
+        indices = ["i", "j", "k", "l"]
+        return self.S(indices[iarg])
 
-    def points_array(self, num_points):
-        # Note: Points array refers to points on the integration cell
-        if num_points is None:
-            return self.S("quadrature_points")
-        else:
-            return self.S("points%d" % (num_points,))
 
-    def quadrature_loop_index(self, num_points):
+    def coefficient_dof_sum_index(self):
+        """Index for loops over coefficient dofs, assumed to never be used in two nested loops."""
+        return self.S("ic")
+
+
+    def quadrature_loop_index(self):
         """Reusing a single index name for all quadrature loops,
         assumed not to be nested."""
-        if num_points == 1:
-            return self.L.LiteralInt(0)
-        elif num_points is None:
-            return self.S("iq")
-        else:
-            return self.S("iq%d" % (num_points,))
+        return self.S("iq")
 
-    def argument_loop_index(self, iarg):
-        "Loop index for argument #iarg."
-        return self.S("ia%d" % (iarg,))
 
-    def coefficient_dof_sum_index(self):
-        """Reusing a single index name for all coefficient dof*basis sums,
-        assumed to always be the innermost loop."""
-        return self.S("ic")
+    def num_custom_quadrature_points(self):
+        "Number of quadrature points, argument to custom integrals."
+        return self.S("num_quadrature_points")
+
+
+    def custom_quadrature_weights(self):
+        "Quadrature weights including cell measure scaling, argument to custom integrals."
+        return self.S("quadrature_weights")
+
+
+    def custom_quadrature_points(self):
+        "Physical quadrature points, argument to custom integrals."
+        return self.S("quadrature_points")
+
+
+    def custom_weights_table(self):
+        "Table for chunk of custom quadrature weights (including cell measure scaling)."
+        return self.S("weights_chunk")
+
+
+    def custom_points_table(self):
+        "Table for chunk of custom quadrature points (physical coordinates)."
+        return self.S("points_chunk")
+
+
+    def weights_table(self, num_points):
+        "Table of quadrature weights."
+        return self.S("weights%d" % (num_points,))
+
+
+    def points_table(self, num_points):
+        "Table of quadrature points (points on the reference integration entity)."
+        return self.S("points%d" % (num_points,))
+
 
     def x_component(self, mt):
         "Physical coordinate component."
         return self.S(format_mt_name("x", mt))
 
+
+    def X_component(self, mt):
+        "Reference coordinate component."
+        return self.S(format_mt_name("X", mt))
+
+
     def J_component(self, mt):
         "Jacobian component."
+        # FIXME: Add domain number!
         return self.S(format_mt_name("J", mt))
 
+
     def domain_dof_access(self, dof, component, gdim, num_scalar_dofs,
                           restriction, interleaved_components):
-        # TODO: Add domain number?
+        # FIXME: Add domain number or offset!
         vc = self.S("coordinate_dofs" + ufc_restriction_postfix(restriction))
         if interleaved_components:
             return vc[gdim*dof + component]
         else:
             return vc[num_scalar_dofs*component + dof]
 
+
     def domain_dofs_access(self, gdim, num_scalar_dofs, restriction,
                            interleaved_components):
-        # TODO: Add domain number?
+        # FIXME: Add domain number or offset!
         return [self.domain_dof_access(dof, component, gdim, num_scalar_dofs,
                                        restriction, interleaved_components)
                 for component in range(gdim)
                 for dof in range(num_scalar_dofs)]
 
+
     def coefficient_dof_access(self, coefficient, dof_number):
         # TODO: Add domain number?
         c = self.coefficient_numbering[coefficient]
         w = self.S("w")
         return w[c, dof_number]
 
-    def coefficient_value(self, mt):  #, num_points):
+
+    def coefficient_value(self, mt):
         "Symbol for variable holding value or derivative component of coefficient."
         c = self.coefficient_numbering[mt.terminal]
         return self.S(format_mt_name("w%d" % (c,), mt))
-        # TODO: Should we include num_points here? Not sure if there is a need.
-        #return self.S(format_mt_name("w%d_%d" % (c, num_points), mt))
+
+
+    def element_table(self, tabledata, entitytype, restriction):
+        if tabledata.is_uniform:
+            entity = 0
+        else:
+            entity = self.entity(entitytype, restriction)
+
+        if tabledata.is_piecewise:
+            iq = 0
+        else:
+            iq = self.quadrature_loop_index()
+
+        # Return direct access to element table
+        return self.S(tabledata.name)[entity][iq]
diff --git a/ffc/uflacs/build_uflacs_ir.py b/ffc/uflacs/build_uflacs_ir.py
new file mode 100644
index 0000000..2f8897f
--- /dev/null
+++ b/ffc/uflacs/build_uflacs_ir.py
@@ -0,0 +1,962 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+
+"""Main algorithm for building the uflacs intermediate representation."""
+
+import numpy
+from collections import defaultdict, namedtuple
+from itertools import chain
+import itertools
+
+from ufl import product, as_ufl
+from ufl.log import error, warning, debug
+from ufl.checks import is_cellwise_constant
+from ufl.classes import CellCoordinate, FacetCoordinate, QuadratureWeight
+from ufl.measure import custom_integral_types, point_integral_types, facet_integral_types
+from ufl.algorithms.analysis import has_type
+
+from ffc.uflacs.analysis.balancing import balance_modifiers
+from ffc.uflacs.analysis.modified_terminals import is_modified_terminal, analyse_modified_terminal
+from ffc.uflacs.analysis.graph import build_graph
+from ffc.uflacs.analysis.graph_vertices import build_scalar_graph_vertices
+from ffc.uflacs.analysis.graph_rebuild import rebuild_with_scalar_subexpressions
+from ffc.uflacs.analysis.dependencies import compute_dependencies, mark_active, mark_image
+from ffc.uflacs.analysis.graph_ssa import compute_dependency_count, invert_dependencies
+#from ffc.uflacs.analysis.graph_ssa import default_cache_score_policy, compute_cache_scores, allocate_registers
+from ffc.uflacs.analysis.factorization import compute_argument_factorization
+from ffc.uflacs.elementtables import build_optimized_tables, piecewise_ttypes, uniform_ttypes, clamp_table_small_numbers
+
+
+# Some quick internal structs, massive improvement to
+# readability and maintainability over just tuples...
+
+ma_data_t = namedtuple(
+    "ma_data_t",
+    ["ma_index", "tabledata"]
+    )
+
+common_block_data_fields = [
+    "block_mode",           # block mode name: "safe" | "full" | "preintegrated" | "premultiplied"
+    "ttypes",               # list of table types for each block rank
+    "factor_index",         # int: index of factor in vertex array
+    "factor_is_piecewise",  # bool: factor is found in piecewise vertex array instead of quadloop specific vertex array
+    "unames",               # list of unique FE table names for each block rank
+    "restrictions",         # restriction "+" | "-" | None for each block rank
+    "transposed",           # block is the transpose of another
+    ]
+common_block_data_t = namedtuple(
+    "common_block_data_t",
+    common_block_data_fields
+    )
+
+
+def get_common_block_data(blockdata):
+    return common_block_data_t(*blockdata[:len(common_block_data_fields)])
+
+
+preintegrated_block_data_t = namedtuple(
+    "preintegrated_block_data_t",
+    common_block_data_fields + ["is_uniform", "name"]
+    )
+
+premultiplied_block_data_t = namedtuple(
+    "premultiplied_block_data_t",
+    common_block_data_fields + ["is_uniform", "name"]
+    )
+
+partial_block_data_t = namedtuple(
+    "partial_block_data_t",
+    common_block_data_fields + ["ma_data", "piecewise_ma_index"]
+    )
+
+full_block_data_t = namedtuple(
+    "full_block_data_t",
+    common_block_data_fields + ["ma_data"]
+    )
+
+
+def multiply_block_interior_facets(point_index, unames, ttypes, unique_tables, unique_table_num_dofs):
+    rank = len(unames)
+    tables = [unique_tables.get(name) for name in unames]
+    num_dofs = tuple(unique_table_num_dofs[name] for name in unames)
+
+    num_entities = max([1] + [tbl.shape[0] for tbl in tables if tbl is not None])
+    ptable = numpy.zeros((num_entities,)*rank + num_dofs)
+    for facets in itertools.product(*[range(num_entities)]*rank):
+        vectors = []
+        for i, tbl in enumerate(tables):
+            if tbl is None:
+                assert ttypes[i] == "ones"
+                vectors.append(numpy.ones((num_dofs[i],)))
+            else:
+                # Some tables are compacted along entities or points
+                e = 0 if tbl.shape[0] == 1 else facets[i]
+                q = 0 if tbl.shape[1] == 1 else point_index
+                vectors.append(tbl[e, q, :])
+        if rank > 1:
+            assert rank == 2
+            ptable[facets[0], facets[1], ...] = numpy.outer(*vectors)
+        elif rank == 1:
+            ptable[facets[0], :] = vectors[0]
+        else:
+            error("Nothing to multiply!")
+
+    return ptable
+
+
+def multiply_block(point_index, unames, ttypes, unique_tables, unique_table_num_dofs):
+    rank = len(unames)
+    tables = [unique_tables.get(name) for name in unames]
+    num_dofs = tuple(unique_table_num_dofs[name] for name in unames)
+
+    num_entities = max([1] + [tbl.shape[0] for tbl in tables if tbl is not None])
+    ptable = numpy.zeros((num_entities,) + num_dofs)
+    for entity in range(num_entities):
+        vectors = []
+        for i, tbl in enumerate(tables):
+            if tbl is None:
+                assert ttypes[i] == "ones"
+                vectors.append(numpy.ones((num_dofs[i],)))
+            else:
+                # Some tables are compacted along entities or points
+                e = 0 if tbl.shape[0] == 1 else entity
+                q = 0 if tbl.shape[1] == 1 else point_index
+                vectors.append(tbl[e, q, :])
+        if rank > 1:
+            ptable[entity, ...] = numpy.outer(*vectors)
+        elif rank == 1:
+            ptable[entity, :] = vectors[0]
+        else:
+            error("Nothing to multiply!")
+
+    return ptable
+
+
+def integrate_block(weights, unames, ttypes, unique_tables, unique_table_num_dofs):
+    rank = len(unames)
+    tables = [unique_tables.get(name) for name in unames]
+    num_dofs = tuple(unique_table_num_dofs[name] for name in unames)
+
+    num_entities = max([1] + [tbl.shape[0] for tbl in tables if tbl is not None])
+    ptable = numpy.zeros((num_entities,) + num_dofs)
+    for iq, w in enumerate(weights):
+        ptable[...] += w * multiply_block(iq, unames, ttypes, unique_tables, unique_table_num_dofs)
+
+    return ptable
+
+
+def integrate_block_interior_facets(weights, unames, ttypes, unique_tables, unique_table_num_dofs):
+    rank = len(unames)
+    tables = [unique_tables.get(name) for name in unames]
+    num_dofs = tuple(unique_table_num_dofs[name] for name in unames)
+
+    num_entities = max([1] + [tbl.shape[0] for tbl in tables if tbl is not None])
+    ptable = numpy.zeros((num_entities,)*rank + num_dofs)
+    for iq, w in enumerate(weights):
+        mtable = multiply_block_interior_facets(iq, unames, ttypes, unique_tables, unique_table_num_dofs)
+        ptable[...] += w * mtable
+
+    return ptable
+
+
+def empty_expr_ir():
+    expr_ir = {}
+    expr_ir["V"] = []
+    expr_ir["V_active"] = []
+    expr_ir["V_targets"] = []
+    expr_ir["V_mts"] = []
+    expr_ir["mt_tabledata"] = {}
+    expr_ir["modified_arguments"] = []
+    expr_ir["preintegrated_blocks"] = {}
+    expr_ir["premultiplied_blocks"] = {}
+    expr_ir["preintegrated_contributions"] = defaultdict(list)
+    expr_ir["block_contributions"] = defaultdict(list)
+    return expr_ir
+
+
+def uflacs_default_parameters(optimize):
+    """Default parameters for tuning of uflacs code generation.
+
+    These are considered experimental and may change
+    without deprecation mechanism at any time.
+    """
+    p = {
+        # Relative precision to use when comparing finite element
+        # table values for table reuse
+        "table_rtol": 1e-6,
+
+        # Absolute precision to use when comparing finite element
+        # table values for table reuse and dropping of table zeros
+        "table_atol": 1e-9,
+
+        # Point chunk size for custom integrals
+        "chunk_size": 8,
+
+        # Optimization parameters used in representation building
+        # TODO: The names of these parameters can be a bit misleading
+        "enable_preintegration": False,
+        "enable_premultiplication": False,
+        "enable_sum_factorization": False,
+        "enable_block_transpose_reuse": False,
+        "enable_table_zero_compression": False,
+
+        # Code generation parameters
+        "vectorize": False,
+        "alignas": 0,
+        "padlen": 1,
+        "use_symbol_array": True,
+        "tensor_init_mode": "upfront",   # interleaved | direct | upfront
+    }
+    if optimize:
+        # Override defaults if optimization is turned on
+        p.update({
+            # Optimization parameters used in representation building
+            # TODO: The names of these parameters can be a bit misleading
+            "enable_preintegration": True,
+            "enable_premultiplication": False,
+            "enable_sum_factorization": True,
+            "enable_block_transpose_reuse": True,
+            "enable_table_zero_compression": True,
+
+            # Code generation parameters
+            "vectorize": False,
+            "alignas": 32,
+            "padlen": 1,
+            "use_symbol_array": True,
+            "tensor_init_mode": "interleaved",   # interleaved | direct | upfront
+        })
+    return p
+
+
+def parse_uflacs_optimization_parameters(parameters, integral_type):
+    """Following model from quadrature representation, extracting
+    uflacs specific parameters from the global parameters dict."""
+
+    # Get default parameters
+    p = uflacs_default_parameters(parameters["optimize"])
+
+    # Override with uflacs specific parameters if
+    # present in given global parameters dict
+    for key in p:
+        if key in parameters:
+            value = parameters[key]
+            # Casting done here because main doesn't know about these parameters
+            if isinstance(p[key], int):
+                value = int(value)
+            elif isinstance(p[key], float):
+                value = float(value)
+            p[key] = value
+
+    # Conditionally disable some optimizations based on integral type,
+    # i.e. these options are not valid for certain integral types
+    skip_preintegrated = point_integral_types + custom_integral_types
+    if integral_type in skip_preintegrated:
+        p["enable_preintegration"] = False
+
+    skip_premultiplied = point_integral_types + custom_integral_types
+    if integral_type in skip_premultiplied:
+        p["enable_premultiplication"] = False
+
+    return p
+
+
+def build_uflacs_ir(cell, integral_type, entitytype,
+                    integrands, tensor_shape,
+                    coefficient_numbering,
+                    quadrature_rules, parameters):
+    # The intermediate representation dict we're building and returning here
+    ir = {}
+
+    # Extract uflacs specific optimization and code generation parameters
+    p = parse_uflacs_optimization_parameters(parameters, integral_type)
+
+    # Pass on parameters for consumption in code generation
+    ir["params"] = p
+
+    # { ufl coefficient: count }
+    ir["coefficient_numbering"] = coefficient_numbering
+
+    # Shared unique tables for all quadrature loops
+    ir["unique_tables"] = {}
+    ir["unique_table_types"] = {}
+
+    # Shared piecewise expr_ir for all quadrature loops
+    ir["piecewise_ir"] = empty_expr_ir()
+
+    # { num_points: expr_ir for one integrand }
+    ir["varying_irs"] = {}
+
+    # Temporary data structures to build shared piecewise data
+    pe2i = {}
+    piecewise_modified_argument_indices = {}
+
+    # Whether we expect the quadrature weight to be applied or not
+    # (in some cases it's just set to 1 in ufl integral scaling)
+    tdim = cell.topological_dimension()
+    expect_weight = (
+        integral_type not in ("expression",) + point_integral_types
+        and (entitytype == "cell"
+            or (entitytype == "facet" and tdim > 1)
+            or (integral_type in custom_integral_types)
+            )
+        )
+
+    if integral_type == "expression":
+        # TODO: Figure out how to get non-integrand expressions in here, this is just a draft:
+        # Analyse all expressions in one list
+        assert isinstance(integrands, (tuple, list))
+        all_num_points = [None]
+        cases = [(None, integrands)]
+    else:
+        # Analyse each num_points/integrand separately
+        assert isinstance(integrands, dict)
+        all_num_points = sorted(integrands.keys())
+        cases = [(num_points, [integrands[num_points]])
+                 for num_points in all_num_points]
+    ir["all_num_points"] = all_num_points
+
+    for num_points, expressions in cases:
+        # Rebalance order of nested terminal modifiers
+        expressions = [balance_modifiers(expr) for expr in expressions]
+
+        # Build initial scalar list-based graph representation
+        V, V_deps, V_targets = build_scalar_graph(expressions)
+
+        # Build terminal_data from V here before factorization.
+        # Then we can use it to derive table properties for all modified terminals,
+        # and then use that to rebuild the scalar graph more efficiently before
+        # argument factorization. We can build terminal_data again after factorization
+        # if that's necessary.
+        initial_terminal_indices = [i for i, v in enumerate(V)
+                                    if is_modified_terminal(v)]
+        initial_terminal_data = [analyse_modified_terminal(V[i])
+                                 for i in initial_terminal_indices]
+        unique_tables, unique_table_types, unique_table_num_dofs, mt_unique_table_reference = \
+            build_optimized_tables(num_points, quadrature_rules,
+                cell, integral_type, entitytype, initial_terminal_data,
+                ir["unique_tables"], p["enable_table_zero_compression"],
+                rtol=p["table_rtol"], atol=p["table_atol"])
+
+        # Replace some scalar modified terminals before reconstructing expressions
+        # (could possibly use replace() on target expressions instead)
+        z = as_ufl(0.0)
+        one = as_ufl(1.0)
+        for i, mt in zip(initial_terminal_indices, initial_terminal_data):
+            if isinstance(mt.terminal, QuadratureWeight):
+                # Replace quadrature weight with 1.0, will be added back later
+                V[i] = one
+            else:
+                # Set modified terminals with zero tables to zero
+                tr = mt_unique_table_reference.get(mt)
+                if tr is not None and tr.ttype == "zeros":
+                    V[i] = z
+
+        # Propagate expression changes using dependency list
+        for i in range(len(V)):
+            deps = [V[j] for j in V_deps[i]]
+            if deps:
+                V[i] = V[i]._ufl_expr_reconstruct_(*deps)
+
+        # Rebuild scalar target expressions and graph
+        # (this may be overkill and possible to optimize
+        # away if it turns out to be costly)
+        expressions = [V[i] for i in V_targets]
+
+        # Rebuild scalar list-based graph representation
+        SV, SV_deps, SV_targets = build_scalar_graph(expressions)
+        assert all(i < len(SV) for i in SV_targets)
+
+        # Compute factorization of arguments
+        (argument_factorizations, modified_arguments,
+             FV, FV_deps, FV_targets) = \
+            compute_argument_factorization(SV, SV_deps, SV_targets, len(tensor_shape))
+        assert len(SV_targets) == len(argument_factorizations)       
+
+        # TODO: Still expecting one target variable in code generation
+        assert len(argument_factorizations) == 1
+        argument_factorization, = argument_factorizations
+
+        # Store modified arguments in analysed form
+        for i in range(len(modified_arguments)):
+            modified_arguments[i] = analyse_modified_terminal(modified_arguments[i])
+
+        # Build set of modified_terminal indices into factorized_vertices
+        modified_terminal_indices = [i for i, v in enumerate(FV)
+                                     if is_modified_terminal(v)]
+
+        # Build set of modified terminal ufl expressions
+        modified_terminals = [analyse_modified_terminal(FV[i])
+                              for i in modified_terminal_indices]
+
+        # Make it easy to get mt object from FV index
+        FV_mts = [None]*len(FV)
+        for i, mt in zip(modified_terminal_indices, modified_terminals):
+            FV_mts[i] = mt
+
+        # Mark active modified arguments
+        #active_modified_arguments = numpy.zeros(len(modified_arguments), dtype=int)
+        #for ma_indices in argument_factorization:
+        #    for j in ma_indices:
+        #        active_modified_arguments[j] = 1
+
+        # Dependency analysis
+        inv_FV_deps, FV_active, FV_piecewise, FV_varying = \
+            analyse_dependencies(FV, FV_deps, FV_targets,
+                                 modified_terminal_indices,
+                                 modified_terminals,
+                                 mt_unique_table_reference)
+
+        # Extend piecewise V with unique new FV_piecewise vertices
+        pir = ir["piecewise_ir"]
+        for i, v in enumerate(FV):
+            if FV_piecewise[i]:
+                j = pe2i.get(v)
+                if j is None:
+                    j = len(pe2i)
+                    pe2i[v] = j
+                    pir["V"].append(v)
+                    pir["V_active"].append(1)
+                    mt = FV_mts[i]
+                    if mt is not None:
+                        pir["mt_tabledata"][mt] = mt_unique_table_reference.get(mt)
+                    pir["V_mts"].append(mt)
+
+        # Extend piecewise modified_arguments list with unique new items
+        for mt in modified_arguments:
+            ma = piecewise_modified_argument_indices.get(mt)
+            if ma is None:
+                ma = len(pir["modified_arguments"])
+                pir["modified_arguments"].append(mt)
+                piecewise_modified_argument_indices[mt] = ma
+
+        # Loop over factorization terms
+        block_contributions = defaultdict(list)
+        for ma_indices, fi in sorted(argument_factorization.items()):
+            # Get a bunch of information about this term
+            rank = len(ma_indices)
+            trs = tuple(mt_unique_table_reference[modified_arguments[ai]] for ai in ma_indices)
+
+            unames = tuple(tr.name for tr in trs)
+            ttypes = tuple(tr.ttype for tr in trs)
+            assert not any(tt == "zeros" for tt in ttypes)
+
+            blockmap = tuple(tr.dofmap for tr in trs)
+
+            block_is_uniform = all(tr.is_uniform for tr in trs)
+
+            # Collect relevant restrictions to identify blocks
+            # correctly in interior facet integrals
+            block_restrictions = []
+            for i, ma in enumerate(ma_indices):
+                if trs[i].is_uniform:
+                    r = None
+                else:
+                    r = modified_arguments[ma].restriction
+                block_restrictions.append(r)
+            block_restrictions = tuple(block_restrictions)
+
+            # Store piecewise status for fi and translate
+            # index to piecewise scope if relevant
+            factor_is_piecewise = FV_piecewise[fi]
+            if factor_is_piecewise:
+                factor_index = pe2i[FV[fi]]
+            else:
+                factor_index = fi
+
+            # TODO: Add separate block modes for quadrature
+            # Both arguments in quadrature elements
+            """
+            for iq
+                fw = f*w
+                #for i
+                #    for j
+                #        B[i,j] = fw*U[i]*V[j] = 0 if i != iq or j != iq
+                BQ[iq] = B[iq,iq] = fw
+            for (iq) 
+                A[iq+offset0, iq+offset1] = BQ[iq]
+            """
+            # One argument in quadrature element
+            """
+            for iq
+                fw[iq] = f*w
+                #for i
+                #    for j
+                #        B[i,j] = fw*UQ[i]*V[j] = 0 if i != iq
+                for j
+                    BQ[iq,j] = fw[iq]*V[iq,j]
+            for (iq) for (j)
+                A[iq+offset, j+offset] = BQ[iq,j]
+            """
+
+            # Decide how to handle code generation for this block
+            if p["enable_preintegration"] and (factor_is_piecewise
+                    and rank > 0 and "quadrature" not in ttypes):
+                # - Piecewise factor is an absolute prerequisite
+                # - Could work for rank 0 as well but currently doesn't
+                # - Haven't considered how quadrature elements work out
+                block_mode = "preintegrated"
+            elif p["enable_premultiplication"] and (rank > 0
+                    and all(tt in piecewise_ttypes for tt in ttypes)):
+                # Integrate functional in quadloop, scale block after quadloop
+                block_mode = "premultiplied"
+            elif p["enable_sum_factorization"]:
+                if (rank == 2 and any(tt in piecewise_ttypes for tt in ttypes)):
+                    # Partial computation in quadloop of f*u[i],
+                    # compute (f*u[i])*v[i] outside quadloop,
+                    # (or with u,v swapped)
+                    block_mode = "partial"
+                else:
+                    # Full runtime integration of f*u[i]*v[j],
+                    # can still do partial computation in quadloop of f*u[i]
+                    # but must compute (f*u[i])*v[i] as well inside quadloop.
+                    # (or with u,v swapped)
+                    block_mode = "full"
+            else:
+                # Use full runtime integration with nothing fancy going on
+                block_mode = "safe"
+
+            # Carry out decision
+            if block_mode == "preintegrated":
+                # Add to contributions:
+                # P = sum_q weight*u*v;      preintegrated here
+                # B[...] = f * P[...];       generated after quadloop
+                # A[blockmap] += B[...];     generated after quadloop
+
+                cache = ir["piecewise_ir"]["preintegrated_blocks"]
+
+                block_is_transposed = False
+                pname = cache.get(unames)
+
+                # Reuse transpose to save memory
+                if p["enable_block_transpose_reuse"] and pname is None and len(unames) == 2:
+                    pname = cache.get((unames[1], unames[0]))
+                    if pname is not None:
+                        # Cache hit on transpose
+                        block_is_transposed = True
+
+                if pname is None:
+                    # Cache miss, precompute block
+                    weights = quadrature_rules[num_points][1]
+                    if integral_type == "interior_facet":
+                        ptable = integrate_block_interior_facets(weights, unames, ttypes,
+                            unique_tables, unique_table_num_dofs)
+                    else:
+                        ptable = integrate_block(weights, unames, ttypes,
+                            unique_tables, unique_table_num_dofs)
+                    ptable = clamp_table_small_numbers(ptable, rtol=p["table_rtol"], atol=p["table_atol"])
+
+                    pname = "PI%d" % (len(cache,))
+                    cache[unames] = pname
+                    unique_tables[pname] = ptable
+                    unique_table_types[pname] = "preintegrated"
+
+                assert factor_is_piecewise
+                block_unames = (pname,)
+                blockdata = preintegrated_block_data_t(block_mode, ttypes,
+                                                       factor_index, factor_is_piecewise,
+                                                       block_unames, block_restrictions,
+                                                       block_is_transposed, block_is_uniform,
+                                                       pname)
+                block_is_piecewise = True
+
+            elif block_mode == "premultiplied":
+                # Add to contributions:
+                # P = u*v;                        computed here
+                # FI = sum_q weight * f;          generated inside quadloop
+                # B[...] = FI * P[...];           generated after quadloop
+                # A[blockmap] += B[...];          generated after quadloop
+
+                cache = ir["piecewise_ir"]["premultiplied_blocks"]
+
+                block_is_transposed = False
+                pname = cache.get(unames)
+
+                # Reuse transpose to save memory
+                if p["enable_block_transpose_reuse"] and pname is None and len(unames) == 2:
+                    pname = cache.get((unames[1], unames[0]))
+                    if pname is not None:
+                        # Cache hit on transpose
+                        block_is_transposed = True
+
+                if pname is None:
+                    # Cache miss, precompute block
+                    if integral_type == "interior_facet":
+                        ptable = multiply_block_interior_facets(0, unames, ttypes, unique_tables, unique_table_num_dofs)
+                    else:
+                        ptable = multiply_block(0, unames, ttypes, unique_tables, unique_table_num_dofs)
+                    pname = "PM%d" % (len(cache,))
+                    cache[unames] = pname
+                    unique_tables[pname] = ptable
+                    unique_table_types[pname] = "premultiplied"
+
+                block_unames = (pname,)
+                blockdata = premultiplied_block_data_t(block_mode, ttypes,
+                                                       factor_index, factor_is_piecewise,
+                                                       block_unames, block_restrictions,
+                                                       block_is_transposed, block_is_uniform,
+                                                       pname)
+                block_is_piecewise = False
+
+            elif block_mode == "scaled":  # TODO: Add mode, block is piecewise but choose not to be premultiplied
+                # Add to contributions:
+                # FI = sum_q weight * f;          generated inside quadloop
+                # B[...] = FI * u * v;            generated after quadloop
+                # A[blockmap] += B[...];          generated after quadloop
+                raise NotImplementedError("scaled block mode not implemented.")
+                # (probably need mostly the same data as premultiplied, except no P table name or values)
+                block_is_piecewise = False
+
+            elif block_mode in ("partial", "full", "safe"):
+                # Translate indices to piecewise context if necessary
+                block_is_piecewise = factor_is_piecewise and not expect_weight
+                ma_data = []
+                for i, ma in enumerate(ma_indices):
+                    if trs[i].is_piecewise:
+                        ma_index = piecewise_modified_argument_indices[modified_arguments[ma]]
+                    else:
+                        block_is_piecewise = False
+                        ma_index = ma
+                    ma_data.append(ma_data_t(ma_index, trs[i]))
+
+                block_is_transposed = False  # FIXME: Handle transposes for these block types
+
+                if block_mode == "partial":
+                    # Add to contributions:
+                    # P[i] = sum_q weight * f * u[i];  generated inside quadloop
+                    # B[i,j] = P[i] * v[j];            generated after quadloop (where v is the piecewise ma)
+                    # A[blockmap] += B[...];           generated after quadloop
+
+                    # Find first piecewise index TODO: Is last better? just reverse range here
+                    for i in range(rank):
+                        if trs[i].is_piecewise:
+                            piecewise_ma_index = i
+                            break
+                    assert rank == 2
+                    not_piecewise_ma_index = 1 - piecewise_ma_index
+                    block_unames = (unames[not_piecewise_ma_index],)
+                    blockdata = partial_block_data_t(block_mode,  ttypes,
+                                                     factor_index, factor_is_piecewise,
+                                                     block_unames, block_restrictions,
+                                                     block_is_transposed,
+                                                     tuple(ma_data), piecewise_ma_index)
+                elif block_mode in ("full", "safe"):
+                    # Add to contributions:
+                    # B[i] = sum_q weight * f * u[i] * v[j];  generated inside quadloop
+                    # A[blockmap] += B[i];                    generated after quadloop
+
+                    block_unames = unames
+                    blockdata = full_block_data_t(block_mode, ttypes,
+                                                  factor_index, factor_is_piecewise,
+                                                  block_unames, block_restrictions,
+                                                  block_is_transposed,
+                                                  tuple(ma_data))
+            else:
+                error("Invalid block_mode %s" % (block_mode,))
+
+            if block_is_piecewise:
+                # Insert in piecewise expr_ir
+                ir["piecewise_ir"]["block_contributions"][blockmap].append(blockdata)
+            else:
+                # Insert in varying expr_ir for this quadrature loop
+                block_contributions[blockmap].append(blockdata)
+
+        # Figure out which table names are referenced in unstructured partition
+        active_table_names = set()
+        for i, mt in zip(modified_terminal_indices, modified_terminals):
+            tr = mt_unique_table_reference.get(mt)
+            if tr is not None and FV_active[i]:
+                active_table_names.add(tr.name)
+
+        # Figure out which table names are referenced in blocks
+        for blockmap, contributions in chain(block_contributions.items(),
+                                             ir["piecewise_ir"]["block_contributions"].items()):
+            for blockdata in contributions:
+                if blockdata.block_mode in ("preintegrated", "premultiplied"):
+                    active_table_names.add(blockdata.name)
+                elif blockdata.block_mode in ("partial", "full", "safe"):
+                    for mad in blockdata.ma_data:
+                        active_table_names.add(mad.tabledata.name)
+
+        # Record all table types before dropping tables
+        ir["unique_table_types"].update(unique_table_types)
+
+        # Drop tables not referenced from modified terminals
+        # and tables of zeros and ones
+        unused_ttypes = ("zeros", "ones", "quadrature")
+        keep_table_names = set()
+        for name in active_table_names:
+            ttype = ir["unique_table_types"][name]
+            if ttype not in unused_ttypes:
+                if name in unique_tables:
+                    keep_table_names.add(name)
+        unique_tables = { name: unique_tables[name]
+                          for name in keep_table_names }
+
+        # Add to global set of all tables
+        for name, table in unique_tables.items():
+            tbl = ir["unique_tables"].get(name)
+            if tbl is not None and not numpy.allclose(tbl, table, rtol=p["table_rtol"], atol=p["table_atol"]):
+                error("Table values mismatch with same name.")
+        ir["unique_tables"].update(unique_tables)
+
+        # Analyse active terminals to check what we'll need to generate code for
+        active_mts = []
+        for i, mt in zip(modified_terminal_indices, modified_terminals):
+            if FV_active[i]:
+                active_mts.append(mt)
+
+        # Figure out if we need to access CellCoordinate to
+        # avoid generating quadrature point table otherwise
+        if integral_type == "cell":
+            need_points = any(isinstance(mt.terminal, CellCoordinate)
+                              for mt in active_mts)
+        elif integral_type in facet_integral_types:
+            need_points = any(isinstance(mt.terminal, FacetCoordinate)
+                              for mt in active_mts)
+        elif integral_type in custom_integral_types:
+            need_points = True  # TODO: Always?
+        else:
+            need_points = False
+
+        # Figure out if we need to access QuadratureWeight to
+        # avoid generating quadrature point table otherwise
+        #need_weights = any(isinstance(mt.terminal, QuadratureWeight)
+        #                   for mt in active_mts)
+
+        # Count blocks of each mode
+        block_modes = defaultdict(int)
+        for blockmap, contributions in block_contributions.items():
+            for blockdata in contributions:
+                block_modes[blockdata.block_mode] += 1
+        # Debug output
+        summary = "\n".join("  %d\t%s" % (count, mode)
+                            for mode, count in sorted(block_modes.items()))
+        debug("Blocks of each mode: \n" + summary)
+
+        # If there are any blocks other than preintegrated we need weights
+        if expect_weight and any(mode != "preintegrated" for mode in block_modes):
+            need_weights = True
+        elif integral_type in custom_integral_types:
+            need_weights = True  # TODO: Always?
+        else:
+            need_weights = False
+
+        # Build IR dict for the given expressions
+        expr_ir = {}
+
+        # (array) FV-index -> UFL subexpression
+        expr_ir["V"] = FV
+
+        # (array) V indices for each input expression component in flattened order
+        expr_ir["V_targets"] = FV_targets
+
+        ### Result of factorization:
+        # (array) MA-index -> UFL expression of modified arguments
+        expr_ir["modified_arguments"] = modified_arguments
+
+        # (dict) tuple(MA-indices) -> FV-index of monomial factor
+        #expr_ir["argument_factorization"] = argument_factorization
+
+        expr_ir["block_contributions"] = block_contributions
+
+        ### Modified terminals
+        # (array) list of FV-indices to modified terminals
+        #expr_ir["modified_terminal_indices"] = modified_terminal_indices
+
+        # Dependency structure of graph:
+        # (CRSArray) FV-index -> direct dependency FV-index list
+        #expr_ir["dependencies"] = FV_deps
+
+        # (CRSArray) FV-index -> direct dependee FV-index list
+        #expr_ir["inverse_dependencies"] = inv_FV_deps
+
+        # Metadata about each vertex
+        #expr_ir["active"] = FV_active        # (array) FV-index -> bool
+        #expr_ir["V_piecewise"] = FV_piecewise  # (array) FV-index -> bool
+        expr_ir["V_varying"] = FV_varying      # (array) FV-index -> bool
+        expr_ir["V_mts"] = FV_mts
+
+        # Store mapping from modified terminal object to
+        # table data, this is used in integralgenerator
+        expr_ir["mt_tabledata"] = mt_unique_table_reference
+
+        # To emit quadrature rules only if needed
+        expr_ir["need_points"] = need_points
+        expr_ir["need_weights"] = need_weights
+
+        # Store final ir for this num_points
+        ir["varying_irs"][num_points] = expr_ir
+
+    return ir
+
+
+def build_scalar_graph(expressions):
+    """Build list representation of expression graph covering the given expressions.
+
+    TODO: Renaming, refactoring and cleanup of the graph building algorithms used in here
+    """
+
+    # Build the initial coarse computational graph of the expression
+    G = build_graph(expressions)
+
+    assert len(expressions) == 1, "FIXME: Multiple expressions in graph building needs more work from this point on."
+
+    # Build more fine grained computational graph of scalar subexpressions
+    # TODO: Make it so that
+    #   expressions[k] <-> NV[nvs[k][:]],
+    #   len(nvs[k]) == value_size(expressions[k])
+    scalar_expressions = rebuild_with_scalar_subexpressions(G)
+
+    # Sanity check on number of scalar symbols/components
+    assert len(scalar_expressions) == sum(product(expr.ufl_shape) for expr in expressions)
+
+    # Build new list representation of graph where all
+    # vertices of V represent single scalar operations
+    e2i, V, V_targets = build_scalar_graph_vertices(scalar_expressions)
+
+    # Compute sparse dependency matrix
+    V_deps = compute_dependencies(e2i, V)
+
+    return V, V_deps, V_targets
+
+
+def analyse_dependencies(V, V_deps, V_targets,
+                         modified_terminal_indices,
+                         modified_terminals,
+                         mt_unique_table_reference):
+    # Count the number of dependencies every subexpr has
+    V_depcount = compute_dependency_count(V_deps)
+
+    # Build the 'inverse' of the sparse dependency matrix
+    inv_deps = invert_dependencies(V_deps, V_depcount)
+
+    # Mark subexpressions of V that are actually needed for final result
+    active, num_active = mark_active(V_deps, V_targets)
+
+    # Build piecewise/varying markers for factorized_vertices
+    varying_ttypes = ("varying", "uniform", "quadrature")
+    varying_indices = []
+    for i, mt in zip(modified_terminal_indices, modified_terminals):
+        tr = mt_unique_table_reference.get(mt)
+        if tr is not None:
+            ttype = tr.ttype
+            # Check if table computations have revealed values varying over points
+            # Note: uniform means entity-wise uniform, varying over points
+            if ttype in varying_ttypes:
+                varying_indices.append(i)
+            else:
+                if ttype not in ("fixed", "piecewise", "ones", "zeros"):
+                    error("Invalid ttype %s" % (ttype,))
+
+        elif not is_cellwise_constant(V[i]):
+            # Keeping this check to be on the safe side,
+            # not sure which cases this will cover (if any)
+            varying_indices.append(i)
+
+    # Mark every subexpression that is computed
+    # from the spatially dependent terminals
+    varying, num_varying = mark_image(inv_deps, varying_indices)
+
+    # The rest of the subexpressions are piecewise constant (1-1=0, 1-0=1)
+    piecewise = 1 - varying
+
+    # Unmark non-active subexpressions
+    varying *= active
+    piecewise *= active
+
+    # TODO: Skip literals in both varying and piecewise
+    # nonliteral = ...
+    # varying *= nonliteral
+    # piecewise *= nonliteral
+
+    return inv_deps, active, piecewise, varying
+
+
+# TODO: Consider comments below and do it or delete them.
+
+""" Old comments:
+
+Work for later::
+
+        - Apply some suitable renumbering of vertices and corresponding arrays prior to returning
+
+        - Allocate separate registers for each partition
+          (but e.g. argument[iq][i0] may need to be accessible in other loops)
+
+        - Improve register allocation algorithm
+
+        - Take a list of expressions as input to compile several expressions in one joined graph
+          (e.g. to compile a,L,M together for nonlinear problems)
+
+"""
+
+
+""" # Old comments:
+
+    # TODO: Inspection of varying shows that factorization is
+    # needed for effective loop invariant code motion w.r.t. quadrature loop as well.
+    # Postphoning that until everything is working fine again.
+    # Core ingredients for such factorization would be:
+    # - Flatten products of products somehow
+    # - Sorting flattened product factors by loop dependency then by canonical ordering
+    # Or to keep binary products:
+    # - Rebalancing product trees ((a*c)*(b*d) -> (a*b)*(c*d)) to make piecewise quantities 'float' to the top of the list
+
+    # rank = max(len(ma_indices) for ma_indices in argument_factorization)
+    # for i,a in enumerate(modified_arguments):
+    #    iarg = a.number()
+    # ipart = a.part()
+
+    # TODO: More structured MA organization?
+    #modified_arguments[rank][block][entry] -> UFL expression of modified argument
+    #dofranges[rank][block] -> (begin, end)
+    # or
+    #modified_arguments[rank][entry] -> UFL expression of modified argument
+    #dofrange[rank][entry] -> (begin, end)
+    #argument_factorization: (dict) tuple(MA-indices (only relevant ones!)) -> V-index of monomial factor
+    # becomes
+    #argument_factorization: (dict) tuple(entry for each(!) rank) -> V-index of monomial factor ## doesn't cover intermediate f*u in f*u*v!
+"""
+
+
+"""
+def old_code_useful_for_optimization():
+
+    # Use heuristics to mark the usefulness of storing every subexpr in a variable
+    scores = compute_cache_scores(V,
+                                  active,
+                                  dependencies,
+                                  inverse_dependencies,
+                                  partitions,  # TODO: Rewrite in terms of something else, this doesn't exist anymore
+                                  cache_score_policy=default_cache_score_policy)
+
+    # Allocate variables to store subexpressions in
+    allocations = allocate_registers(active, partitions, target_variables,
+                                     scores, int(parameters["max_registers"]), int(parameters["score_threshold"]))
+    target_registers = [allocations[r] for r in target_variables]
+    num_registers = sum(1 if x >= 0 else 0 for x in allocations)
+    # TODO: If we renumber we can allocate registers separately for each partition, which is probably a good idea.
+
+    expr_oir = {}
+    expr_oir["num_registers"] = num_registers
+    expr_oir["partitions"] = partitions
+    expr_oir["allocations"] = allocations
+    expr_oir["target_registers"] = target_registers
+    return expr_oir
+"""
+
diff --git a/ffc/uflacs/elementtables.py b/ffc/uflacs/elementtables.py
new file mode 100644
index 0000000..e77b764
--- /dev/null
+++ b/ffc/uflacs/elementtables.py
@@ -0,0 +1,641 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
+
+"""Tools for precomputed tables of terminal values."""
+
+from __future__ import print_function  # used in some debugging
+
+from collections import namedtuple
+import numpy
+
+from ufl.cell import num_cell_entities
+from ufl.utils.sequences import product
+from ufl.utils.derivativetuples import derivative_listing_to_counts
+from ufl.permutation import build_component_numbering
+from ufl.classes import FormArgument, GeometricQuantity, SpatialCoordinate, Jacobian
+from ufl.algorithms.analysis import unique_tuple
+from ufl.measure import custom_integral_types
+
+from ffc.log import error
+from ffc.fiatinterface import create_element
+from ffc.representationutils import integral_type_to_entity_dim, map_integral_points
+from ffc.representationutils import create_quadrature_points_and_weights
+from ffc.uflacs.backends.ffc.common import ufc_restriction_offset
+
+
+# Using same defaults as numpy.allclose
+default_rtol = 1e-5
+default_atol = 1e-8
+
+
+table_origin_t = namedtuple("table_origin",
+    ["element", "avg", "derivatives", "flat_component", "dofrange", "dofmap"])
+
+
+piecewise_ttypes = ("piecewise", "fixed", "ones", "zeros")
+
+
+uniform_ttypes = ("uniform", "fixed", "ones", "zeros")
+
+
+valid_ttypes = set(("quadrature",)) | set(piecewise_ttypes) | set(uniform_ttypes)
+
+
+unique_table_reference_t = namedtuple("unique_table_reference",
+    ["name", "values",
+     "dofrange", "dofmap", "original_dim",
+     "ttype", "is_piecewise", "is_uniform"])
+
+
+def equal_tables(a, b, rtol=default_rtol, atol=default_atol):
+    a = numpy.asarray(a)
+    b = numpy.asarray(b)
+    if a.shape != b.shape:
+        return False
+    else:
+        return numpy.allclose(a, b, rtol=rtol, atol=atol)
+
+
+def clamp_table_small_numbers(table, rtol=default_rtol, atol=default_atol):
+    "Clamp almost 0,1,-1 values to integers. Returns new table."
+    # Get shape of table and number of columns, defined as the last axis
+    table = numpy.asarray(table)
+    for n in (-1.0, -0.5, 0.0, 0.5, 1.0):
+        table[numpy.where(numpy.isclose(table, n, rtol=rtol, atol=atol))] = n
+    return table
+
+
+def strip_table_zeros(table, compress_zeros, rtol=default_rtol, atol=default_atol):
+    "Strip zero columns from table. Returns column range (begin, end) and the new compact table."
+    # Get shape of table and number of columns, defined as the last axis
+    table = numpy.asarray(table)
+    sh = table.shape
+
+    # Find nonzero columns
+    z = numpy.zeros(sh[:-1])  # Correctly shaped zero table
+    dofmap = tuple(i for i in range(sh[-1])
+                   if not numpy.allclose(z, table[..., i], rtol=rtol, atol=atol))
+    if dofmap:
+        # Find first nonzero column
+        begin = dofmap[0]
+        # Find (one beyond) last nonzero column
+        end = dofmap[-1] + 1
+    else:
+        begin = 0
+        end = 0
+
+    # If compression is not wanted, pretend whole range is nonzero
+    if not compress_zeros:
+        dofmap = tuple(range(begin, end))
+
+    # Make subtable by dropping zero columns
+    stripped_table = table[..., dofmap]
+    dofrange = (begin, end)
+    return dofrange, dofmap, stripped_table
+
+
+def build_unique_tables(tables, rtol=default_rtol, atol=default_atol):
+    """Given a list or dict of tables, return a list of unique tables
+    and a dict of unique table indices for each input table key."""
+    unique = []
+    mapping = {}
+
+    if isinstance(tables, list):
+        keys = list(range(len(tables)))
+    elif isinstance(tables, dict):
+        keys = sorted(tables.keys())
+
+    for k in keys:
+        t = tables[k]
+        found = -1
+        for i, u in enumerate(unique):
+            if equal_tables(u, t, rtol=rtol, atol=atol):
+                found = i
+                break
+        if found == -1:
+            i = len(unique)
+            unique.append(t)
+        mapping[k] = i
+
+    return unique, mapping
+
+
+def get_ffc_table_values(points,
+                         cell, integral_type,
+                         ufl_element, avg,
+                         entitytype, derivative_counts,
+                         flat_component):
+    """Extract values from ffc element table.
+
+    Returns a 3D numpy array with axes
+    (entity number, quadrature point number, dof number)
+    """
+    deriv_order = sum(derivative_counts)
+
+    if integral_type in custom_integral_types:
+        # Use quadrature points on cell for analysis in custom integral types
+        integral_type = "cell"
+        assert not avg
+
+    if avg in ("cell", "facet"):
+        # Redefine points to compute average tables
+
+        # Make sure this is not called with points, that doesn't make sense
+        #assert points is None
+
+        # Not expecting derivatives of averages
+        assert not any(derivative_counts)
+        assert deriv_order == 0
+
+        # Doesn't matter if it's exterior or interior facet integral,
+        # just need a valid integral type to create quadrature rule
+        if avg == "cell":
+            integral_type = "cell"
+        elif avg == "facet":
+            integral_type = "exterior_facet"
+
+        # Make quadrature rule and get points and weights
+        points, weights = create_quadrature_points_and_weights(
+            integral_type, cell, ufl_element.degree(), "default")
+
+    # Tabulate table of basis functions and derivatives in points for each entity
+    fiat_element = create_element(ufl_element)
+    tdim = cell.topological_dimension()
+    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
+    num_entities = num_cell_entities[cell.cellname()][entity_dim]
+    entity_tables = []
+    for entity in range(num_entities):
+        entity_points = map_integral_points(points, integral_type, cell, entity)
+        tbl = fiat_element.tabulate(deriv_order, entity_points)[derivative_counts]
+        entity_tables.append(tbl)
+
+    # Extract arrays for the right scalar component
+    component_tables = []
+    sh = ufl_element.value_shape()
+    if sh == ():
+        # Scalar valued element
+        for entity, entity_table in enumerate(entity_tables):
+            component_tables.append(entity_table)
+    elif len(sh) == 2 and ufl_element.num_sub_elements() == 0:
+        # 2-tensor-valued elements, not a tensor product
+        # mapping flat_component back to tensor component
+        (_, f2t) = build_component_numbering(sh, ufl_element.symmetry())
+        t_comp = f2t[flat_component]
+        for entity, entity_table in enumerate(entity_tables):
+            tbl = entity_table[:, t_comp[0], t_comp[1], :]
+            component_tables.append(tbl)
+    else:
+        # Vector-valued or mixed element
+        for entity, entity_table in enumerate(entity_tables):
+            tbl = entity_table[:, flat_component, :]
+            component_tables.append(tbl)
+
+    if avg in ("cell", "facet"):
+        # Compute numeric integral of the each component table
+        wsum = sum(weights)
+        for entity, tbl in enumerate(component_tables):
+            num_dofs = tbl.shape[0]
+            tbl = numpy.dot(tbl, weights) / wsum
+            tbl = numpy.reshape(tbl, (num_dofs, 1))
+            component_tables[entity] = tbl
+
+    # Loop over entities and fill table blockwise (each block = points x dofs)
+    # Reorder axes as (points, dofs) instead of (dofs, points)
+    assert len(component_tables) == num_entities
+    num_dofs, num_points = component_tables[0].shape
+    shape = (num_entities, num_points, num_dofs)
+    res = numpy.zeros(shape)
+    for entity in range(num_entities):
+        res[entity, :, :] = numpy.transpose(component_tables[entity])
+    return res
+
+
+def generate_psi_table_name(num_points, element_counter, averaged,
+                            entitytype, derivative_counts, flat_component):
+    """Generate a name for the psi table of the form:
+    FE#_C#_D###[_AC|_AF|][_F|V][_Q#], where '#' will be an integer value.
+
+    FE  - is a simple counter to distinguish the various bases, it will be
+          assigned in an arbitrary fashion.
+
+    C   - is the component number if any (this does not yet take into account
+          tensor valued functions)
+
+    D   - is the number of derivatives in each spatial direction if any.
+          If the element is defined in 3D, then D012 means d^3(*)/dydz^2.
+
+    AC  - marks that the element values are averaged over the cell
+
+    AF  - marks that the element values are averaged over the facet
+
+    F   - marks that the first array dimension enumerates facets on the cell
+
+    V   - marks that the first array dimension enumerates vertices on the cell
+
+    Q   - number of quadrature points, to distinguish between tables in a mixed quadrature degree setting
+
+    """
+    name = "FE%d" % element_counter
+    if flat_component is not None:
+        name += "_C%d" % flat_component
+    if any(derivative_counts):
+        name += "_D" + "".join(str(d) for d in derivative_counts)
+    name += { None: "", "cell": "_AC", "facet": "_AF" }[averaged]
+    name += { "cell": "", "facet": "_F", "vertex": "_V" }[entitytype]
+    if num_points is not None:
+        name += "_Q%d" % num_points
+    return name
+
+
+def get_modified_terminal_element(mt):
+    gd = mt.global_derivatives
+    ld = mt.local_derivatives
+
+    # Extract element from FormArguments and relevant GeometricQuantities
+    if isinstance(mt.terminal, FormArgument):
+        if gd and mt.reference_value:
+            error("Global derivatives of reference values not defined.")
+        elif ld and not mt.reference_value:
+            error("Local derivatives of global values not defined.")
+        element = mt.terminal.ufl_element()
+        fc = mt.flat_component
+    elif isinstance(mt.terminal, SpatialCoordinate):
+        if mt.reference_value:
+            error("Not expecting reference value of x.")
+        if gd:
+            error("Not expecting global derivatives of x.")
+        element = mt.terminal.ufl_domain().ufl_coordinate_element()
+        if not ld:
+            fc = mt.flat_component
+        else:
+            # Actually the Jacobian expressed as reference_grad(x)
+            fc = mt.flat_component  # x-component
+            assert len(mt.component) == 1
+            assert mt.component[0] == mt.flat_component
+    elif isinstance(mt.terminal, Jacobian):
+        if mt.reference_value:
+            error("Not expecting reference value of J.")
+        if gd:
+            error("Not expecting global derivatives of J.")
+        element = mt.terminal.ufl_domain().ufl_coordinate_element()
+        # Translate component J[i,d] to x element context rgrad(x[i])[d]
+        assert len(mt.component) == 2
+        fc, d = mt.component  # x-component, derivative
+        ld = tuple(sorted((d,) + ld))
+    else:
+        return None
+
+    assert not (mt.averaged and (ld or gd))
+
+    # Change derivatives format for table lookup
+    #gdim = mt.terminal.ufl_domain().geometric_dimension()
+    #global_derivatives = derivative_listing_to_counts(gd, gdim)
+
+    # Change derivatives format for table lookup
+    tdim = mt.terminal.ufl_domain().topological_dimension()
+    local_derivatives = derivative_listing_to_counts(ld, tdim)
+    
+    return element, mt.averaged, local_derivatives, fc
+
+
+def build_element_tables(num_points, quadrature_rules,
+                         cell, integral_type, entitytype,
+                         modified_terminals, rtol=default_rtol, atol=default_atol):
+    """Build the element tables needed for a list of modified terminals.
+
+    Input:
+      entitytype - str
+      modified_terminals - ordered sequence of unique modified terminals
+      FIXME: Document
+
+    Output:
+      tables - dict(name: table)
+      mt_table_names - dict(ModifiedTerminal: name)
+
+    """
+    mt_table_names = {}
+    tables = {}
+    table_origins = {}
+
+    # Add to element tables
+    analysis = {}
+    for mt in modified_terminals:
+        # FIXME: Use a namedtuple for res
+        res = get_modified_terminal_element(mt)
+        if res:
+            analysis[mt] = res
+
+    # Build element numbering using topological
+    # ordering so subelements get priority
+    from ffc.analysis import extract_sub_elements, sort_elements, _compute_element_numbers
+    all_elements = [res[0] for res in analysis.values()]
+    unique_elements = sort_elements(extract_sub_elements(all_elements))
+    element_numbers = _compute_element_numbers(unique_elements)
+
+    def add_table(res):
+        element, avg, local_derivatives, flat_component = res
+
+        # Build name for this particular table
+        element_number = element_numbers[element]
+        name = generate_psi_table_name(
+            num_points, element_number, avg,
+            entitytype, local_derivatives, flat_component)
+
+        # Extract the values of the table from ffc table format
+        if name not in tables:
+            tables[name] = get_ffc_table_values(
+                quadrature_rules[num_points][0],
+                cell, integral_type,
+                element, avg,
+                entitytype, local_derivatives, flat_component)
+
+            # Track table origin for custom integrals:
+            table_origins[name] = res
+        return name
+
+    for mt in modified_terminals:
+        res = analysis.get(mt)
+        if not res:
+            continue
+        element, avg, local_derivatives, flat_component = res
+
+        # Generate tables for each subelement in topological ordering,
+        # using same avg and local_derivatives, for each component.
+        # We want the first table to be the innermost subelement so that's
+        # the one the optimized tables get the name from and so that's
+        # the one the table origins point to for custom integrals.
+        # This results in some superfluous tables but those will be
+        # removed before code generation and it's not believed to be
+        # a bottleneck.
+        for subelement in sort_elements(extract_sub_elements([element])):
+            for fc in range(product(subelement.reference_value_shape())):
+                subres = (subelement, avg, local_derivatives, fc)
+                name_ignored = add_table(subres)
+
+        # Generate table and store table name with modified terminal
+        name = add_table(res)
+        mt_table_names[mt] = name
+
+    return tables, mt_table_names, table_origins
+
+
+def optimize_element_tables(tables, table_origins, compress_zeros, rtol=default_rtol, atol=default_atol):
+    """Optimize tables and make unique set.
+
+    Steps taken:
+
+      - clamp values that are very close to -1, 0, +1 to those values
+      - remove dofs from beginning and end of tables where values are all zero
+      - for each modified terminal, provide the dof range that a given table corresponds to
+
+    Terminology:
+      name - str, name used in input arguments here
+      table - numpy array of float values
+      stripped_table - numpy array of float values with zeroes
+                       removed from each end of dofrange
+
+    Input:
+      tables - { name: table }
+      table_origins - FIXME
+
+    Output:
+      unique_tables - { unique_name: stripped_table }
+      unique_table_origins - FIXME
+    """
+    used_names = sorted(tables)
+    compressed_tables = {}
+    table_ranges = {}
+    table_dofmaps = {}
+    table_original_num_dofs = {}
+
+    for name in used_names:
+        tbl = tables[name]
+
+        # Clamp to selected small numbers if close,
+        # (-1.0, -0.5, 0.0, 0.5 and +1.0)
+        # (i.e. 0.999999 -> 1.0 if within rtol/atol distance)
+        tbl = clamp_table_small_numbers(tbl, rtol=rtol, atol=atol)
+
+        # Store original dof dimension before compressing
+        num_dofs = tbl.shape[2]
+
+        # Strip contiguous zero blocks at the ends of all tables
+        dofrange, dofmap, tbl = strip_table_zeros(tbl, compress_zeros, rtol=rtol, atol=atol)
+
+        compressed_tables[name] = tbl
+        table_ranges[name] = dofrange
+        table_dofmaps[name] = dofmap
+        table_original_num_dofs[name] = num_dofs
+
+    # Build unique table mapping
+    unique_tables_list, name_to_unique_index = build_unique_tables(
+        compressed_tables, rtol=rtol, atol=atol)
+
+    # Build mapping of constructed table names to unique names.
+    # Picking first constructed name preserves some information
+    # about the table origins although some names may be dropped.
+    unique_names = {}
+    for name in used_names:
+        ui = name_to_unique_index[name]
+        if ui not in unique_names:
+            unique_names[ui] = name
+    table_unames = { name: unique_names[name_to_unique_index[name]]
+                     for name in name_to_unique_index }
+
+    # Build mapping from unique table name to the table itself
+    unique_tables = {}
+    for ui, tbl in enumerate(unique_tables_list):
+        uname = unique_names[ui]
+        unique_tables[uname] = tbl
+
+    unique_table_origins = {}
+    #for ui in range(len(unique_tables_list)):
+    for ui in []:  # FIXME
+        uname = unique_names[ui]
+
+        # Track table origins for runtime recomputation in custom integrals:
+        name = "name of 'smallest' element we can use to compute this table"
+        dofrange = "FIXME"  # table_ranges[name]
+        dofmap = "FIXME"  # table_dofmaps[name]
+
+        # FIXME: Make sure the "smallest" element is chosen
+        (element, avg, derivative_counts, fc) = table_origins[name]
+        unique_table_origins[uname] = table_origin_t(element, avg, derivative_counts, fc, dofrange, dofmap)
+
+    return unique_tables, unique_table_origins, table_unames, table_ranges, table_dofmaps, table_original_num_dofs
+
+
+def is_zeros_table(table, rtol=default_rtol, atol=default_atol):
+    return (product(table.shape) == 0
+            or numpy.allclose(table, numpy.zeros(table.shape), rtol=rtol, atol=atol))
+
+
+def is_ones_table(table, rtol=default_rtol, atol=default_atol):
+    return numpy.allclose(table, numpy.ones(table.shape), rtol=rtol, atol=atol)
+
+
+def is_quadrature_table(table, rtol=default_rtol, atol=default_atol):
+    num_entities, num_points, num_dofs = table.shape
+    I = numpy.eye(num_points)
+    return (num_points == num_dofs
+            and all(numpy.allclose(table[i, :, :], I, rtol=rtol, atol=atol)
+                    for i in range(num_entities)))
+
+
+def is_piecewise_table(table, rtol=default_rtol, atol=default_atol):
+    return all(numpy.allclose(table[:, 0, :], table[:, i, :], rtol=rtol, atol=atol)
+               for i in range(1, table.shape[1]))
+
+
+def is_uniform_table(table, rtol=default_rtol, atol=default_atol):
+    return all(numpy.allclose(table[0, :, :], table[i, :, :], rtol=rtol, atol=atol)
+               for i in range(1, table.shape[0]))
+
+
+def analyse_table_type(table, rtol=default_rtol, atol=default_atol):
+    num_entities, num_points, num_dofs = table.shape
+    if is_zeros_table(table, rtol=rtol, atol=atol):
+        # Table is empty or all values are 0.0
+        ttype = "zeros"
+    elif is_ones_table(table, rtol=rtol, atol=atol):
+        # All values are 1.0
+        ttype = "ones"
+    elif is_quadrature_table(table, rtol=rtol, atol=atol):
+        # Identity matrix mapping points to dofs (separately on each entity)
+        ttype = "quadrature"
+    else:
+        # Equal for all points on a given entity
+        piecewise = is_piecewise_table(table, rtol=rtol, atol=atol)
+
+        # Equal for all entities
+        uniform = is_uniform_table(table, rtol=rtol, atol=atol)
+
+        if piecewise and uniform:
+            # Constant for all points and all entities
+            ttype = "fixed"
+        elif piecewise:
+            # Constant for all points on each entity separately
+            ttype = "piecewise"
+        elif uniform:
+            # Equal on all entities
+            ttype = "uniform"
+        else:
+            # Varying over points and entities
+            ttype = "varying"
+    return ttype
+
+
+def analyse_table_types(unique_tables, rtol=default_rtol, atol=default_atol):
+    return { uname: analyse_table_type(table, rtol=rtol, atol=atol)
+             for uname, table in unique_tables.items() }
+
+
+def build_optimized_tables(num_points, quadrature_rules,
+                           cell, integral_type, entitytype,
+                           modified_terminals, existing_tables,
+                           compress_zeros, rtol=default_rtol, atol=default_atol):
+    # Build tables needed by all modified terminals
+    tables, mt_table_names, table_origins = \
+        build_element_tables(num_points, quadrature_rules,
+            cell, integral_type, entitytype,
+            modified_terminals, rtol=rtol, atol=atol)
+
+    # Optimize tables and get table name and dofrange for each modified terminal
+    unique_tables, unique_table_origins, table_unames, table_ranges, table_dofmaps, table_original_num_dofs = \
+        optimize_element_tables(tables, table_origins, compress_zeros, rtol=rtol, atol=atol)
+
+    # Get num_dofs for all tables before they can be deleted later
+    unique_table_num_dofs = { uname: tbl.shape[2] for uname, tbl in unique_tables.items() }
+
+    # Analyze tables for properties useful for optimization
+    unique_table_ttypes = analyse_table_types(unique_tables, rtol=rtol, atol=atol)
+
+    # Compress tables that are constant along num_entities or num_points
+    for uname, tabletype in unique_table_ttypes.items():
+        if tabletype in piecewise_ttypes:
+            # Reduce table to dimension 1 along num_points axis in generated code
+            unique_tables[uname] = unique_tables[uname][:,0:1,:]
+        if tabletype in uniform_ttypes:
+            # Reduce table to dimension 1 along num_entities axis in generated code
+            unique_tables[uname] = unique_tables[uname][0:1,:,:]
+
+    # Delete tables not referenced by modified terminals
+    used_unames = set(table_unames[name] for name in mt_table_names.values())
+    unused_unames = set(unique_tables.keys()) - used_unames
+    for uname in unused_unames:
+        del unique_table_ttypes[uname]
+        del unique_tables[uname]
+
+    # Change tables to point to existing optimized tables
+    # (i.e. tables from other contexts that have been compressed to look the same)
+    name_map = {}
+    existing_names = sorted(existing_tables)
+    for uname in sorted(unique_tables):
+        utbl = unique_tables[uname]
+        for i, ename in enumerate(existing_names):
+            etbl = existing_tables[ename]
+            if equal_tables(utbl, etbl, rtol=rtol, atol=atol):
+                # Setup table name mapping
+                name_map[uname] = ename
+                # Don't visit this table again (just to avoid the processing)
+                existing_names.pop(i)
+                break
+
+    # Replace unique table names
+    for uname, ename in name_map.items():
+        unique_tables[ename] = existing_tables[ename]
+        del unique_tables[uname]
+        unique_table_ttypes[ename] = unique_table_ttypes[uname]
+        del unique_table_ttypes[uname]
+
+    # Build mapping from modified terminal to unique table with metadata
+    # { mt: (unique name,
+    #        (table dof range begin, table dof range end),
+    #        [top parent element dof index for each local index],
+    #        ttype, original_element_dim) }
+    mt_unique_table_reference = {}
+    for mt, name in list(mt_table_names.items()):
+        # Get metadata for the original table (name is not the unique name!)
+        dofrange = table_ranges[name]
+        dofmap = table_dofmaps[name]
+        original_dim = table_original_num_dofs[name]
+
+        # Map name -> uname
+        uname = table_unames[name]
+
+        # Map uname -> ename
+        ename = name_map.get(uname, uname)
+
+        # Some more metadata stored under the ename
+        ttype = unique_table_ttypes[ename]
+
+        # Add offset to dofmap and dofrange for restricted terminals
+        if mt.restriction and isinstance(mt.terminal, FormArgument):
+            # offset = 0 or number of dofs before table optimization
+            offset = ufc_restriction_offset(mt.restriction, original_dim)
+            (b, e) = dofrange
+            dofrange = (b + offset, e + offset)
+            dofmap = tuple(i + offset for i in dofmap)
+
+        # Store reference to unique table for this mt
+        mt_unique_table_reference[mt] = unique_table_reference_t(
+            ename, unique_tables[ename],
+            dofrange, dofmap, original_dim,
+            ttype, ttype in piecewise_ttypes, ttype in uniform_ttypes)
+
+    return unique_tables, unique_table_ttypes, unique_table_num_dofs, mt_unique_table_reference
diff --git a/ffc/uflacs/elementtables/table_utils.py b/ffc/uflacs/elementtables/table_utils.py
deleted file mode 100644
index 9f61e23..0000000
--- a/ffc/uflacs/elementtables/table_utils.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""Utilities for precomputed table manipulation."""
-
-from __future__ import print_function  # used in some debugging
-
-import numpy
-
-from ufl.permutation import build_component_numbering
-from ufl.cell import num_cell_entities
-
-from ffc.log import error
-from ffc.fiatinterface import create_element
-from ffc.representationutils import integral_type_to_entity_dim, map_integral_points
-from ffc.representationutils import create_quadrature_points_and_weights
-
-def equal_tables(a, b, eps):
-    "Compare tables to be equal within a tolerance."
-    a = numpy.asarray(a)
-    b = numpy.asarray(b)
-    if a.shape != b.shape:
-        return False
-    if len(a.shape) > 1:
-        return all(equal_tables(a[i], b[i], eps)
-                   for i in range(a.shape[0]))
-    def scalars_equal(x, y, eps):
-        return abs(x-y) < eps
-    return all(scalars_equal(a[i], b[i], eps)
-               for i in range(a.shape[0]))
-
-
-def clamp_table_small_integers(table, eps):
-    "Clamp almost 0,1,-1 values to integers. Returns new table."
-    # Get shape of table and number of columns, defined as the last axis
-    table = numpy.asarray(table)
-    for n in (-1, 0, 1):
-        table[numpy.where(abs(table - n) < eps)] = float(n)
-    return table
-
-
-def strip_table_zeros(table, eps):
-    "Strip zero columns from table. Returns column range (begin,end) and the new compact table."
-    # Get shape of table and number of columns, defined as the last axis
-    table = numpy.asarray(table)
-    sh = table.shape
-    nc = sh[-1]
-
-    # Find first nonzero column
-    begin = nc
-    for i in range(nc):
-        if numpy.linalg.norm(table[..., i]) > eps:
-            begin = i
-            break
-
-    # Find (one beyond) last nonzero column
-    end = begin
-    for i in range(nc-1, begin-1, -1):
-        if numpy.linalg.norm(table[..., i]) > eps:
-            end = i+1
-            break
-
-    # Make subtable by stripping first and last columns
-    stripped_table = table[..., begin:end]
-    return begin, end, stripped_table
-
-
-def build_unique_tables(tables, eps):
-    """Given a list or dict of tables, return a list of unique tables
-    and a dict of unique table indices for each input table key."""
-    unique = []
-    mapping = {}
-
-    if isinstance(tables, list):
-        keys = list(range(len(tables)))
-    elif isinstance(tables, dict):
-        keys = sorted(tables.keys())
-
-    for k in keys:
-        t = tables[k]
-        found = -1
-        for i, u in enumerate(unique):
-            if equal_tables(u, t, eps):
-                found = i
-                break
-        if found == -1:
-            i = len(unique)
-            unique.append(t)
-        mapping[k] = i
-
-    return unique, mapping
-
-
-def get_ffc_table_values(points,
-                         cell, integral_type,
-                         num_points, # TODO: Remove, not needed
-                         ufl_element, avg,
-                         entitytype, derivative_counts,
-                         flat_component, epsilon):
-    """Extract values from ffc element table.
-
-    Returns a 3D numpy array with axes
-    (entity number, quadrature point number, dof number)
-    """
-    deriv_order = sum(derivative_counts)
-
-    if avg in ("cell", "facet"):
-        # Redefine points to compute average tables
-
-        # Make sure this is not called with points, that doesn't make sense
-        #assert points is None
-        #assert num_points is None
-
-        # Not expecting derivatives of averages
-        assert not any(derivative_counts)
-        assert deriv_order == 0
-
-        # Doesn't matter if it's exterior or interior facet integral,
-        # just need a valid integral type to create quadrature rule
-        if avg == "cell":
-            integral_type = "cell"
-        elif avg == "facet":
-            integral_type = "exterior_facet"
-
-        # Make quadrature rule and get points and weights
-        points, weights = create_quadrature_points_and_weights(
-            integral_type, cell, ufl_element.degree(), "default")
-
-    # Tabulate table of basis functions and derivatives in points for each entity
-    fiat_element = create_element(ufl_element)
-    tdim = cell.topological_dimension()
-    entity_dim = integral_type_to_entity_dim(integral_type, tdim)
-    num_entities = num_cell_entities[cell.cellname()][entity_dim]
-    entity_tables = []
-    for entity in range(num_entities):
-        entity_points = map_integral_points(points, integral_type, cell, entity)
-        tbl = fiat_element.tabulate(deriv_order, entity_points)[derivative_counts]
-        entity_tables.append(tbl)
-
-    # Extract arrays for the right scalar component
-    component_tables = []
-    sh = ufl_element.value_shape()
-    if sh == ():
-        # Scalar valued element
-        for entity, entity_table in enumerate(entity_tables):
-            component_tables.append(entity_table)
-    elif len(sh) == 2 and ufl_element.num_sub_elements() == 0:
-        # 2-tensor-valued elements, not a tensor product
-        # mapping flat_component back to tensor component
-        (_, f2t) = build_component_numbering(sh, ufl_element.symmetry())
-        t_comp = f2t[flat_component]
-        for entity, entity_table in enumerate(entity_tables):
-            tbl = entity_table[:, t_comp[0], t_comp[1], :]
-            component_tables.append(tbl)
-    else:
-        # Vector-valued or mixed element
-        for entity, entity_table in enumerate(entity_tables):
-            tbl = entity_table[:, flat_component, :]
-            component_tables.append(tbl)
-
-    if avg in ("cell", "facet"):
-        # Compute numeric integral of the each component table
-        wsum = sum(weights)
-        for entity, tbl in enumerate(component_tables):
-            num_dofs = tbl.shape[0]
-            tbl = numpy.dot(tbl, weights) / wsum
-            tbl = numpy.reshape(tbl, (num_dofs, 1))
-            component_tables[entity] = tbl
-
-    # Loop over entities and fill table blockwise (each block = points x dofs)
-    # Reorder axes as (points, dofs) instead of (dofs, points)
-    assert len(component_tables) == num_entities
-    num_dofs, num_points = component_tables[0].shape
-    shape = (num_entities, num_points, num_dofs)
-    res = numpy.zeros(shape)
-    for entity in range(num_entities):
-        res[entity, :, :] = numpy.transpose(component_tables[entity])
-    return res
-
-
-def generate_psi_table_name(num_points, element_counter, averaged,
-                            entitytype, derivative_counts, flat_component):
-    """Generate a name for the psi table of the form:
-    FE#_C#_D###[_AC|_AF|][_F|V][_Q#], where '#' will be an integer value.
-
-    FE  - is a simple counter to distinguish the various bases, it will be
-          assigned in an arbitrary fashion.
-
-    C   - is the component number if any (this does not yet take into account
-          tensor valued functions)
-
-    D   - is the number of derivatives in each spatial direction if any.
-          If the element is defined in 3D, then D012 means d^3(*)/dydz^2.
-
-    AC  - marks that the element values are averaged over the cell
-
-    AF  - marks that the element values are averaged over the facet
-
-    F   - marks that the first array dimension enumerates facets on the cell
-
-    V   - marks that the first array dimension enumerates vertices on the cell
-
-    Q   - number of quadrature points, to distinguish between tables in a mixed quadrature degree setting
-
-    """
-    name = "FE%d" % element_counter
-    if flat_component is not None:
-        name += "_C%d" % flat_component
-    if any(derivative_counts):
-        name += "_D" + "".join(str(d) for d in derivative_counts)
-    name += { None: "", "cell": "_AC", "facet": "_AF" }[averaged]
-    name += { "cell": "", "facet": "_F", "vertex": "_V" }[entitytype]
-    if num_points is not None:
-        name += "_Q%d" % num_points
-    return name
diff --git a/ffc/uflacs/elementtables/terminaltables.py b/ffc/uflacs/elementtables/terminaltables.py
deleted file mode 100644
index 9dd8c90..0000000
--- a/ffc/uflacs/elementtables/terminaltables.py
+++ /dev/null
@@ -1,416 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for precomputed tables of terminal values."""
-
-import numpy
-
-from ufl.utils.sequences import product
-from ufl.utils.derivativetuples import derivative_listing_to_counts
-from ufl.permutation import build_component_numbering
-from ufl.classes import FormArgument, GeometricQuantity, SpatialCoordinate, Jacobian
-from ufl.algorithms.analysis import unique_tuple
-
-from ffc.log import error
-
-from ffc.uflacs.elementtables.table_utils import generate_psi_table_name, get_ffc_table_values
-from ffc.uflacs.elementtables.table_utils import clamp_table_small_integers, strip_table_zeros, build_unique_tables
-
-from ffc.uflacs.backends.ffc.common import ufc_restriction_offset
-
-
-class Table(object):  # TODO: Use this class for tables with metadata
-    """Table with metadata.
-
-    Valid table types:
-    "zeros"
-    "ones"
-    "quadrature"
-    "piecewise"
-    "uniform"
-    "fixed"
-    "varying"
-
-    FIXME: Document these. For now see table computation.
-    """
-    def __init__(self, name, values, tabletype):
-        self.name = name
-        self.values = values
-        self.num_entities = values.shape[0]
-        self.num_points = values.shape[1]
-        self.num_dofs = values.shape[2]
-        self.tabletype = tabletype
-
-        self.piecewise = tabletype in ("piecewise", "fixed")
-        self.uniform = tabletype in ("uniform", "fixed")
-
-
-def get_modified_terminal_element(mt):
-    gd = mt.global_derivatives
-    ld = mt.local_derivatives
-
-    # Extract element from FormArguments and relevant GeometricQuantities
-    if isinstance(mt.terminal, FormArgument):
-        if gd and mt.reference_value:
-            error("Global derivatives of reference values not defined.")
-        elif ld and not mt.reference_value:
-            error("Local derivatives of global values not defined.")
-        element = mt.terminal.ufl_element()
-        fc = mt.flat_component
-    elif isinstance(mt.terminal, SpatialCoordinate):
-        if mt.reference_value:
-            error("Not expecting reference value of x.")
-        if gd:
-            error("Not expecting global derivatives of x.")
-        element = mt.terminal.ufl_domain().ufl_coordinate_element()
-        if not ld:
-            fc = mt.flat_component
-        else:
-            # Actually the Jacobian expressed as reference_grad(x)
-            fc = mt.flat_component  # x-component
-            assert len(mt.component) == 1
-            assert mt.component[0] == mt.flat_component
-    elif isinstance(mt.terminal, Jacobian):
-        if mt.reference_value:
-            error("Not expecting reference value of J.")
-        if gd:
-            error("Not expecting global derivatives of J.")
-        element = mt.terminal.ufl_domain().ufl_coordinate_element()
-        # Translate component J[i,d] to x element context rgrad(x[i])[d]
-        assert len(mt.component) == 2
-        fc, d = mt.component  # x-component, derivative
-        ld = tuple(sorted((d,) + ld))
-    else:
-        return None
-
-    assert not (mt.averaged and (ld or gd))
-
-    # Change derivatives format for table lookup
-    #gdim = mt.terminal.ufl_domain().geometric_dimension()
-    #global_derivatives = derivative_listing_to_counts(gd, gdim)
-
-    # Change derivatives format for table lookup
-    tdim = mt.terminal.ufl_domain().topological_dimension()
-    local_derivatives = derivative_listing_to_counts(ld, tdim)
-    
-    return element, mt.averaged, local_derivatives, fc
-
-
-def build_element_tables(num_points, quadrature_rules,
-                         cell, integral_type, entitytype,
-                         modified_terminals, epsilon):
-    """Build the element tables needed for a list of modified terminals.
-
-    Input:
-      entitytype - str
-      modified_terminals - ordered sequence of unique modified terminals
-      FIXME: Document
-
-    Output:
-      tables - dict(name: table)
-      mt_table_names - dict(ModifiedTerminal: name)
-
-    """
-    mt_table_names = {}
-    tables = {}
-    table_origins = {}
-
-    # Add to element tables
-    analysis = {}
-    for mt in modified_terminals:
-        # FIXME: Use a namedtuple for res
-        res = get_modified_terminal_element(mt)
-        if res:
-            analysis[mt] = res
-
-    # Build element numbering using topological
-    # ordering so subelements get priority
-    from ffc.analysis import extract_sub_elements, sort_elements, _compute_element_numbers
-    all_elements = [res[0] for res in analysis.values()]
-    unique_elements = sort_elements(extract_sub_elements(all_elements))
-    element_numbers = _compute_element_numbers(unique_elements)
-
-    def add_table(res):
-        element, avg, local_derivatives, flat_component = res
-
-        # Build name for this particular table
-        element_number = element_numbers[element]
-        name = generate_psi_table_name(
-            num_points, element_number, avg,
-            entitytype, local_derivatives, flat_component)
-
-        # Extract the values of the table from ffc table format
-        if name not in tables:
-            tables[name] = get_ffc_table_values(
-                quadrature_rules[num_points][0],
-                cell, integral_type,
-                num_points, element, avg,
-                entitytype, local_derivatives, flat_component,
-                epsilon)
-
-            # Track table origin for custom integrals:
-            table_origins[name] = res
-        return name
-
-    for mt in modified_terminals:
-        res = analysis.get(mt)
-        if not res:
-            continue
-        element, avg, local_derivatives, flat_component = res
-
-        # Generate tables for each subelement in topological ordering,
-        # using same avg and local_derivatives, for each component.
-        # We want the first table to be the innermost subelement so that's
-        # the one the optimized tables get the name from and so that's
-        # the one the table origins point to for custom integrals.
-        # This results in some superfluous tables but those will be
-        # removed before code generation and it's not believed to be
-        # a bottleneck.
-        for subelement in sort_elements(extract_sub_elements([element])):
-            for fc in range(product(subelement.reference_value_shape())):
-                subres = (subelement, avg, local_derivatives, fc)
-                name_ignored = add_table(subres)
-
-        # Generate table and store table name with modified terminal
-        name = add_table(res)
-        mt_table_names[mt] = name
-
-    return tables, mt_table_names, table_origins
-
-
-def optimize_element_tables(tables, mt_table_names, table_origins, epsilon):
-    """Optimize tables and make unique set.
-
-    Steps taken:
-
-      - clamp values that are very close to -1, 0, +1 to those values
-      - remove dofs from beginning and end of tables where values are all zero
-      - for each modified terminal, provide the dof range that a given table corresponds to
-
-    Terminology:
-      name - str, name used in input arguments here
-      mt - modified terminal
-      table - numpy array of float values
-      stripped_table - numpy array of float values with zeroes
-                       removed from each end of dofrange
-
-    Input:
-      tables - { name: table }
-      mt_table_names - { mt: name }
-
-    Output:
-      unique_tables - { unique_name: stripped_table }
-      mt_table_ranges - { mt: (unique_name, begin, end) }
-    """
-    # Find and sort all unique table names mentioned in mt_table_names
-    used_names = set(mt_table_names.values())
-    assert None not in used_names
-    #used_names.remove(None)
-    used_names = sorted(used_names)
-
-    # Drop unused tables (if any at this point)
-    tables = { name: tables[name] for name in tables if name in used_names }
-
-    # Clamp almost -1.0, 0.0, and +1.0 values first
-    # (i.e. 0.999999 -> 1.0 if within epsilon distance)
-    for name in used_names:
-        tables[name] = clamp_table_small_integers(tables[name], epsilon)
-
-    # Strip contiguous zero blocks at the ends of all tables
-    table_ranges = {}
-    for name in used_names:
-        begin, end, stripped_table = strip_table_zeros(tables[name], epsilon)
-        tables[name] = stripped_table
-        table_ranges[name] = (begin, end)
-
-    # Build unique table mapping
-    unique_tables_list, name_to_unique_index = build_unique_tables(tables, epsilon)
-
-    # Build mapping of constructed table names to unique names.
-    # Picking first constructed name preserves some information
-    # about the table origins although some names may be dropped.
-    unique_names = {}
-    for name in used_names:
-        ui = name_to_unique_index[name]
-        if ui not in unique_names:
-            unique_names[ui] = name
-
-    # Build mapping from unique table name to the table itself
-    unique_tables = {}
-    for ui in range(len(unique_tables_list)):
-        unique_tables[unique_names[ui]] = unique_tables_list[ui]
-
-    unique_table_origins = {}
-    for ui in range(len(unique_tables_list)):
-        uname = unique_names[ui]
-        # Track table origins for runtime recomputation in custom integrals:
-        dofrange = table_ranges[uname]
-        # FIXME: Make sure the "smallest" element is chosen
-        (element, avg, derivative_counts, fc) = table_origins[name]
-        unique_table_origins[uname] = (element, avg, derivative_counts, fc, dofrange)
-
-    # Build mapping from modified terminal to compacted table and dof range
-    # { mt: (unique name, table dof range begin, table dof range end) }
-    mt_table_ranges = {}
-    for mt, name in mt_table_names.items():
-        assert name is not None
-        b, e = table_ranges[name]
-        ui = name_to_unique_index[name]
-        unique_name = unique_names[ui]
-        mt_table_ranges[mt] = (unique_name, b, e)
-
-    return unique_tables, mt_table_ranges, unique_table_origins
-
-
-def offset_restricted_table_ranges(mt_table_ranges, mt_table_names,
-                                   tables, modified_terminals):
-    # Modify dof ranges for restricted form arguments
-    # (geometry gets padded variable names instead)
-    for mt in modified_terminals:
-        if mt.restriction and isinstance(mt.terminal, FormArgument):
-            # offset = 0 or number of dofs before table optimization
-            num_original_dofs = int(tables[mt_table_names[mt]].shape[-1])
-            offset = ufc_restriction_offset(mt.restriction, num_original_dofs)
-            (unique_name, b, e) = mt_table_ranges[mt]
-            mt_table_ranges[mt] = (unique_name, b + offset, e + offset)
-    return mt_table_ranges
-
-
-def is_zeros_table(table, epsilon):
-    return (product(table.shape) == 0
-            or numpy.allclose(table, numpy.zeros(table.shape), atol=epsilon))
-
-
-def is_ones_table(table, epsilon):
-    return numpy.allclose(table, numpy.ones(table.shape), atol=epsilon)
-
-
-def is_quadrature_table(table, epsilon):
-    num_entities, num_points, num_dofs = table.shape
-    I = numpy.eye(num_points)
-    return (num_points == num_dofs
-            and all(numpy.allclose(table[i, :, :], I, atol=epsilon)
-                    for i in range(num_entities)))
-
-
-def is_piecewise_table(table, epsilon):
-    return all(numpy.allclose(table[:, 0, :], table[:, i, :], atol=epsilon)
-               for i in range(1, table.shape[1]))
-
-
-def is_uniform_table(table, epsilon):
-    return all(numpy.allclose(table[0, :, :], table[i, :, :], atol=epsilon)
-               for i in range(1, table.shape[0]))
-
-
-def analyse_table_types(unique_tables, epsilon):
-    table_types = {}
-    for unique_name, table in unique_tables.items():
-        num_entities, num_points, num_dofs = table.shape
-        if is_zeros_table(table, epsilon):
-            # Table is empty or all values are 0.0
-            tabletype = "zeros"
-        elif is_ones_table(table, epsilon):
-            # All values are 1.0
-            tabletype = "ones"
-        elif is_quadrature_table(table, epsilon):
-            # Identity matrix mapping points to dofs (separately on each entity)
-            tabletype = "quadrature"
-        else:
-            # Equal for all points on a given entity
-            piecewise = is_piecewise_table(table, epsilon)
-
-            # Equal for all entities
-            uniform = is_uniform_table(table, epsilon)
-
-            if piecewise and uniform:
-                # Constant for all points and all entities
-                tabletype = "fixed"
-            elif piecewise:
-                # Constant for all points on each entity separately
-                tabletype = "piecewise"
-            elif uniform:
-                # Equal on all entities
-                tabletype = "uniform"
-            else:
-                # Varying over points and entities
-                tabletype = "varying"
-
-        table_types[unique_name] = tabletype
-
-    return table_types
-
-
-def build_optimized_tables(num_points, quadrature_rules,
-                           cell, integral_type, entitytype,
-                           modified_terminals, parameters):
-    # Get tolerance for checking table values against 0.0 or 1.0
-    from ffc.uflacs.language.format_value import get_float_threshold
-    epsilon = get_float_threshold()
-    # FIXME: Should be epsilon from ffc parameters
-    #epsilon = parameters["epsilon"]
-
-    # Build tables needed by all modified terminals
-    tables, mt_table_names, table_origins = \
-        build_element_tables(num_points, quadrature_rules,
-            cell, integral_type, entitytype,
-            modified_terminals, epsilon)
-
-    # Optimize tables and get table name and dofrange for each modified terminal
-    unique_tables, mt_table_ranges, table_origins = \
-        optimize_element_tables(tables, mt_table_names, table_origins, epsilon)
-
-    # Analyze tables for properties useful for optimization
-    table_types = analyse_table_types(unique_tables, epsilon)
-
-
-    # Consistency checking
-    for unique_name, tabletype in table_types.items():
-        if tabletype == "zeros":
-            # All table ranges referring to this table should be empty
-            assert all(data[1] == data[2]
-                       for mt, data in mt_table_ranges.items()
-                       if data is not None and data[0] == unique_name)
-        if tabletype == "varying":
-            # No table ranges referring to this table should be averaged
-            assert all(not mt.averaged
-                       for mt, data in mt_table_ranges.items()
-                       if data is not None and data[0] == unique_name)
-
-
-    # Add offsets to dof ranges for restricted terminals
-    mt_table_ranges = offset_restricted_table_ranges(
-        mt_table_ranges, mt_table_names, tables, modified_terminals)
-
-    # Delete unused tables and compress piecewise constant tables
-    used_names = set(tabledata[0] for tabledata in mt_table_ranges.values())
-    unused_names = set(unique_tables.keys()) - used_names
-    for uname in unused_names:
-        del table_types[uname]
-        del unique_tables[uname]
-    for uname, tabletype in table_types.items():
-        if tabletype in ("piecewise", "fixed"):
-            # Reduce table to dimension 1 along num_points axis in generated code
-            unique_tables[uname] = unique_tables[uname][:,0:1,:]
-        if tabletype in ("uniform", "fixed"):
-            # Reduce table to dimension 1 along num_entities axis in generated code
-            unique_tables[uname] = unique_tables[uname][0:1,:,:]
-        if tabletype in ("zeros", "ones", "quadrature"):
-            del unique_tables[uname]
-
-    return unique_tables, mt_table_ranges, table_types
diff --git a/ffc/uflacs/generation/integralgenerator.py b/ffc/uflacs/generation/integralgenerator.py
deleted file mode 100644
index a8cf922..0000000
--- a/ffc/uflacs/generation/integralgenerator.py
+++ /dev/null
@@ -1,510 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
-
-"""Controlling algorithm for building the tabulate_tensor source structure from factorized representation."""
-
-from ufl import product
-from ufl.classes import ConstantValue, Condition
-
-from ffc.log import error, warning
-
-from ffc.uflacs.analysis.modified_terminals import analyse_modified_terminal, is_modified_terminal
-
-
-class IntegralGenerator(object):
-
-    def __init__(self, ir, backend):
-        # Store ir
-        self.ir = ir
-
-        # Backend specific plugin with attributes
-        # - language: for translating ufl operators to target language
-        # - symbols: for translating ufl operators to target language
-        # - definitions: for defining backend specific variables
-        # - access: for accessing backend specific variables
-        self.backend = backend
-
-        # Set of operator names code has been generated for,
-        # used in the end for selecting necessary includes
-        self._ufl_names = set()
-
-
-    def get_includes(self):
-        "Return list of include statements needed to support generated code."
-        includes = set()
-
-        includes.add("#include <cstring>")  # for using memset
-        #includes.add("#include <algorithm>")  # for using std::fill instead of memset
-
-        cmath_names = set((
-                "abs", "sign", "pow", "sqrt",
-                "exp", "ln",
-                "cos", "sin", "tan",
-                "acos", "asin", "atan", "atan_2",
-                "cosh", "sinh", "tanh",
-                "acosh", "asinh", "atanh",
-                "erf", "erfc",
-            ))
-
-        boost_math_names = set((
-            "bessel_j", "bessel_y", "bessel_i", "bessel_k",
-            ))
-
-        # Only return the necessary headers
-        if cmath_names & self._ufl_names:
-            includes.add("#include <cmath>")
-
-        if boost_math_names & self._ufl_names:
-            includes.add("#include <boost/math/special_functions.hpp>")
-
-        return sorted(includes)
-
-
-    def generate(self):
-        """Generate entire tabulate_tensor body.
-
-        Assumes that the code returned from here will be wrapped in a context
-        that matches a suitable version of the UFC tabulate_tensor signatures.
-        """
-        L = self.backend.language
-
-        parts = []
-        parts += self.generate_quadrature_tables()
-        parts += self.generate_element_tables()
-        parts += self.generate_tensor_reset()
-
-        # If we have integrals with different number of quadrature points,
-        # we wrap each integral in a separate scope, avoiding having to
-        # think about name clashes for now. This is a bit wasteful in that
-        # piecewise quantities are not shared, but at least it should work.
-        expr_irs = self.ir["expr_irs"]
-        all_num_points = sorted(expr_irs)
-
-        # Reset variables, separate sets for quadrature loop
-        self.vaccesses = { num_points: {} for num_points in all_num_points }
-
-        for num_points in all_num_points:
-            body = []
-            body += self.generate_unstructured_partition(num_points, "piecewise")
-            body += self.generate_dofblock_partition(num_points, "piecewise")
-            body += self.generate_quadrature_loops(num_points)
-
-            # If there are multiple quadrature rules here, just wrapping
-            # in Scope to avoid thinking about scoping issues for now.
-            # A better handling of multiple rules would be nice,
-            # in particular 
-            if len(all_num_points) > 1:
-                parts.append(L.Scope(body))
-            else:
-                parts.extend(body)
-
-        parts += self.generate_finishing_statements()
-
-        return L.StatementList(parts)
-
-
-    def generate_quadrature_tables(self):
-        "Generate static tables of quadrature points and weights."
-        L = self.backend.language
-
-        parts = []
-
-        # No quadrature tables for custom (given argument)
-        # or point (evaluation in single vertex)
-        skip = ("custom", "cutcell", "interface", "overlap", "vertex")
-        if self.ir["integral_type"] in skip:
-            return parts
-
-        # Loop over quadrature rules
-        qrs = self.ir["quadrature_rules"]
-        for num_points in sorted(qrs):
-            points, weights = qrs[num_points]
-            assert num_points == len(weights)
-            expr_ir = self.ir["expr_irs"][num_points]
-
-            # Generate quadrature weights array
-            if expr_ir["need_weights"]:
-                wsym = self.backend.symbols.weights_array(num_points)
-                parts += [L.ArrayDecl("static const double", wsym, num_points, weights,
-                                      alignas=self.ir["alignas"])]
-
-            # Size of quadrature points depends on context, assume this is correct:
-            pdim = len(points[0])
-            assert points.shape[0] == num_points
-            assert pdim == points.shape[1]
-            #import IPython; IPython.embed()
-
-            # Generate quadrature points array
-            if pdim and expr_ir["need_points"]:
-                # Flatten array: (TODO: avoid flattening here, it makes padding harder)
-                flattened_points = points.reshape(product(points.shape))
-                psym = self.backend.symbols.points_array(num_points)
-                parts += [L.ArrayDecl("static const double", psym, num_points * pdim,
-                                      flattened_points, alignas=self.ir["alignas"])]
-
-        # Add leading comment if there are any tables
-        parts = L.commented_code_list(parts,
-            "Section for quadrature weights and points")
-        return parts
-
-
-    def generate_element_tables(self):
-        """Generate static tables with precomputed element basis
-        function values in quadrature points."""
-        L = self.backend.language
-        parts = []
-        expr_irs = self.ir["expr_irs"]
-
-        for num_points in sorted(expr_irs):
-            # Get all unique tables for this quadrature rule
-            tables = expr_irs[num_points]["unique_tables"]
-            if tables:
-                tmp = "Definitions of {0} tables for {1} quadrature points"
-                parts += [L.Comment(tmp.format(len(tables), num_points))]
-                for name in sorted(tables):
-                    # TODO: table here can actually have only 1 point,
-                    # regroup or at least fix generated comment
-                    table = tables[name]
-                    # TODO: Not padding, consider when and if to do so
-                    parts += [L.ArrayDecl("static const double", name, table.shape, table,
-                                          alignas=self.ir["alignas"])]
-        # Add leading comment if there are any tables
-        parts = L.commented_code_list(parts, [
-            "Section for precomputed element basis function values",
-            "Table dimensions: num_entities, num_points, num_dofs"])
-        return parts
-
-
-    def generate_tensor_reset(self):
-        "Generate statements for resetting the element tensor to zero."
-        L = self.backend.language
-
-        # TODO: Move this to language module, make CNode type
-        def memzero(ptrname, size):
-            tmp = "memset({ptrname}, 0, {size} * sizeof(*{ptrname}));"
-            code = tmp.format(ptrname=str(ptrname), size=size)
-            return L.VerbatimStatement(code)
-
-        # Compute tensor size
-        A = self.backend.symbols.element_tensor()
-        A_size = product(self.ir["tensor_shape"])
-
-        # Stitch it together
-        parts = [L.Comment("Reset element tensor")]
-        if A_size == 1:
-            parts += [L.Assign(A[0], L.LiteralFloat(0.0))]
-        else:
-            parts += [memzero(A, A_size)]
-        return parts
-
-
-    def generate_quadrature_loops(self, num_points):
-        "Generate all quadrature loops."
-        L = self.backend.language
-        body = []
-
-        # Generate unstructured varying partition
-        body += self.generate_unstructured_partition(num_points, "varying")
-        body = L.commented_code_list(body,
-            "Quadrature loop body setup (num_points={0})".format(num_points))
-
-        body += self.generate_dofblock_partition(num_points, "varying")
-
-        # Wrap body in loop or scope
-        if not body:
-            # Could happen for integral with everything zero and optimized away
-            parts = []
-        elif num_points == 1:
-            # For now wrapping body in Scope to avoid thinking about scoping issues
-            parts = L.commented_code_list(L.Scope(body), "Only 1 quadrature point, no loop")
-        else:
-            # Regular case: define quadrature loop
-            iq = self.backend.symbols.quadrature_loop_index(num_points)
-            np = self.backend.symbols.num_quadrature_points(num_points)
-            parts = [L.ForRange(iq, 0, np, body=body)]
-
-        return parts
-
-
-    def generate_dofblock_partition(self, num_points, partition):
-        L = self.backend.language
-
-        # TODO: Add partial blocks (T[i0] = factor_index * arg0;)
-
-        # TODO: Move piecewise blocks outside quadrature loop
-        # (Can only do this by removing weight from factor,
-        # and using that piecewise f*u*v gives that
-        # sum_q weight[q]*f*u*v == f*u*v*(sum_q weight[q]) )
-
-        # Get representation details
-        expr_ir = self.ir["expr_irs"][num_points]
-        V = expr_ir["V"]
-        modified_arguments = expr_ir["modified_arguments"]
-        block_contributions = expr_ir["block_contributions"]
-
-        vaccesses = self.vaccesses[num_points]
-        A = self.backend.symbols.element_tensor()
-
-        parts = []
-        for dofblock, contributions in sorted(block_contributions[partition].items()):
-            for data in contributions:
-                (ma_indices, factor_index, table_ranges, unames, ttypes) = data
-
-                # Add code in layers starting with innermost A[...] += product(factors)
-                rank = len(unames)
-                factors = []
-
-                # Get factor expression
-                v = V[factor_index]
-                if not (v._ufl_is_literal_ and float(v) == 1.0):
-                    factors.append(vaccesses[v])
-
-                # Get loop counter symbols to access A with
-                A_indices = []
-                for i in range(rank):
-                    if ttypes[i] == "quadrature":
-                        # Used to index A like A[iq*num_dofs + iq]
-                        ia = self.backend.symbols.quadrature_loop_index(num_points)
-                    else:
-                        # Regular dof index
-                        ia = self.backend.symbols.argument_loop_index(i)
-                    A_indices.append(ia)
-
-                # Add table access to factors, unless it's always 1.0
-                for i in range(rank):
-                    tt = ttypes[i]
-                    assert tt not in ("zeros",)
-                    if tt not in ("quadrature", "ones"):
-                        ma = ma_indices[i]
-                        access = self.backend.access(
-                            modified_arguments[ma].terminal,
-                            modified_arguments[ma],
-                            table_ranges[i],
-                            num_points)
-                        factors.append(access)
-
-                # Special case where all factors are 1.0 and dropped
-                if factors:
-                    term = L.Product(factors)
-                else:
-                    term = L.LiteralFloat(1.0)
-
-                # Format flattened index expression to access A
-                flat_index = L.flattened_indices(A_indices, self.ir["tensor_shape"])
-                body = L.AssignAdd(A[flat_index], term)
-
-                # Wrap accumulation in loop nest
-                #for i in range(rank):
-                for i in range(rank-1, -1, -1):
-                    if ttypes[i] != "quadrature":
-                        dofrange = dofblock[i]
-                        body = L.ForRange(A_indices[i], dofrange[0], dofrange[1], body=body)
-
-                # Add this block to parts
-                parts.append(body)
-
-        return parts
-
-
-    def generate_partition(self, symbol, V, partition, table_ranges, num_points):
-        L = self.backend.language
-
-        definitions = []
-        intermediates = []
-
-        vaccesses = self.vaccesses[num_points]
-
-        partition_indices = [i for i, p in enumerate(partition) if p]
-
-        for i in partition_indices:
-            v = V[i]
-
-            if is_modified_terminal(v):
-                mt = analyse_modified_terminal(v)
-
-                # Backend specific modified terminal translation
-                vaccess = self.backend.access(mt.terminal,
-                    mt, table_ranges[i], num_points)
-                vdef = self.backend.definitions(mt.terminal,
-                    mt, table_ranges[i], num_points, vaccess)
-
-                # Store definitions of terminals in list
-                assert isinstance(vdef, list)
-                definitions.extend(vdef)
-            else:
-                # Get previously visited operands (TODO: use edges of V instead of ufl_operands?)
-                vops = [vaccesses[op] for op in v.ufl_operands]
-
-                # Mapping UFL operator to target language
-                self._ufl_names.add(v._ufl_handler_name_)
-                vexpr = self.backend.ufl_to_language(v, *vops)
-
-                # TODO: Let optimized ir provide mapping of vertex indices to
-                # variable indices, marking which subexpressions to store in variables
-                # and in what order:
-                #j = variable_id[i]
-
-                # Currently instead creating a new intermediate for
-                # each subexpression except boolean conditions
-                if isinstance(v, Condition):
-                    # Inline the conditions x < y, condition values
-                    # 'x' and 'y' may still be stored in intermediates.
-                    # This removes the need to handle boolean intermediate variables.
-                    # With tensor-valued conditionals it may not be optimal but we
-                    # let the C++ compiler take responsibility for optimizing those cases.
-                    j = None
-                else:
-                    j = len(intermediates)
-
-                if j is not None:
-                    # Record assignment of vexpr to intermediate variable
-                    vaccess = symbol[j]
-                    intermediates.append(L.Assign(vaccess, vexpr))
-                else:
-                    # Access the inlined expression
-                    vaccess = vexpr
-
-            # Store access node for future reference
-            vaccesses[v] = vaccess
-
-        # Join terminal computation, array of intermediate expressions,
-        # and intermediate computations
-        parts = []
-        if definitions:
-            parts += definitions
-        if intermediates:
-            parts += [L.ArrayDecl("double", symbol, len(intermediates),
-                                  alignas=self.ir["alignas"])]
-            parts += intermediates
-        return parts
-
-
-    def generate_unstructured_partition(self, num_points, partition):
-        L = self.backend.language
-        expr_ir = self.ir["expr_irs"][num_points]
-        if partition == "piecewise":
-            name = "sp"
-        elif partition == "varying":
-            name = "sv"
-        arraysymbol = L.Symbol("{0}{1}".format(name, num_points))
-        parts = self.generate_partition(arraysymbol,
-                                        expr_ir["V"],
-                                        expr_ir[partition],
-                                        expr_ir["table_ranges"],
-                                        num_points)
-        parts = L.commented_code_list(parts,
-            "Unstructured %s computations" % (partition,))
-        return parts
-
-
-    def generate_finishing_statements(self):
-        """Generate finishing statements.
-
-        This includes assigning to output array if there is no integration.
-        """
-        parts = []
-
-        if self.ir["integral_type"] == "expression":
-            error("Expression generation not implemented yet.")
-            # TODO: If no integration, assuming we generate an expression, and assign results here
-            # Corresponding code from compiler.py:
-            # assign_to_variables = tfmt.output_variable_names(len(final_variable_names))
-            # parts += list(format_assignments(zip(assign_to_variables, final_variable_names)))
-
-        return parts
-
-
-"""
-    # TODO: Rather take list of vertices, not markers
-    # XXX FIXME: Fix up this function and use it instead?
-    def alternative_generate_partition(self, symbol, C, MT, partition, table_ranges, num_points):
-        L = self.backend.language
-
-        definitions = []
-        intermediates = []
-
-        # XXX FIXME: create these!
-        # C = input CRSArray representation of expression DAG
-        # MT = input list/dict of modified terminals
-
-        self.ast_variables = [None]*len(C) # FIXME: Create outside
-
-        # TODO: Get this as input instead of partition?
-        partition_indices = [i for i, p in enumerate(partition) if p]
-        for i in partition_indices:
-            row = C[i] # XXX FIXME: Get this as input
-            if len(row) == 1:
-                # Modified terminal
-                t, = row
-                mt = MT[t] # XXX FIXME: Get this as input
-                tc = mt[0]
-
-
-                if isinstance(mt.terminal, ConstantValue):
-                    # Format literal value for the chosen language
-                    modified_literal_to_ast_node = []  # silence flake8
-                    # XXX FIXME: Implement this mapping:
-                    vaccess = modified_literal_to_ast_node[tc](mt)
-                    vdef = None
-                else:
-                    # Backend specific modified terminal formatting
-                    vaccess = self.backend.access(mt.terminal,
-                        mt, table_ranges[i], num_points)
-                    vdef = self.backend.definitions(mt.terminal,
-                        mt, table_ranges[i], num_points, vaccess)
-
-                # Store definitions of terminals in list
-                if vdef is not None:
-                    definitions.append(vdef)
-
-            else:
-                # Application of operator with typecode tc to operands with indices ops
-                tc = mt[0]
-                ops = mt[1:]
-
-                # Get operand AST nodes
-                opsaccess = [self.ast_variables[k] for k in ops]
-
-                # Generate expression for this operator application
-                typecode2astnode = []  # silence flake8
-                vexpr = typecode2astnode[tc](opsaccess) # XXX FIXME: Implement this mapping
-
-                store_this_in_variable = True # TODO: Don't store all subexpressions
-                if store_this_in_variable:
-                    # Record assignment of vexpr to intermediate variable
-                    j = len(intermediates)
-                    vaccess = symbol[j]
-                    intermediates.append(L.Assign(vaccess, vexpr))
-                else:
-                    # Access the inlined expression
-                    vaccess = vexpr
-
-            # Store access string, either a variable symbol or an inlined expression
-            self.ast_variables[i] = vaccess
-
-        # Join terminal computation, array of intermediate expressions,
-        # and intermediate computations
-        parts = []
-        if definitions:
-            parts += definitions
-        if intermediates:
-            parts += [L.ArrayDecl("double", symbol, len(intermediates),
-                                  alignas=self.ir["alignas"])]
-            parts += intermediates
-        return parts
-"""
diff --git a/ffc/uflacs/integralgenerator.py b/ffc/uflacs/integralgenerator.py
new file mode 100644
index 0000000..9c171be
--- /dev/null
+++ b/ffc/uflacs/integralgenerator.py
@@ -0,0 +1,1219 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2011-2016 Martin Sandve Alnæs
+#
+# This file is part of UFLACS.
+#
+# UFLACS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# UFLACS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with UFLACS. If not, see <http://www.gnu.org/licenses/>
+
+"""Controlling algorithm for building the tabulate_tensor source structure from factorized representation."""
+
+from collections import defaultdict
+import itertools
+
+from ufl import product
+from ufl.classes import ConstantValue, Condition
+from ufl.measure import custom_integral_types, point_integral_types
+
+from ffc.log import error, warning
+
+from ffc.uflacs.build_uflacs_ir import get_common_block_data
+from ffc.uflacs.elementtables import piecewise_ttypes
+from ffc.uflacs.language.cnodes import pad_innermost_dim, pad_dim, MemZero, MemZeroRange
+
+
+class IntegralGenerator(object):
+
+    def __init__(self, ir, backend, precision):
+        # Store ir
+        self.ir = ir
+
+        # Formatting precision
+        self.precision = precision
+
+        # Backend specific plugin with attributes
+        # - language: for translating ufl operators to target language
+        # - symbols: for translating ufl operators to target language
+        # - definitions: for defining backend specific variables
+        # - access: for accessing backend specific variables
+        self.backend = backend
+
+        # Set of operator names code has been generated for,
+        # used in the end for selecting necessary includes
+        self._ufl_names = set()
+
+        # Initialize lookup tables for variable scopes
+        self.init_scopes()
+
+        # Cache of reusable blocks contributing to A
+        self.shared_blocks = {}
+
+        # Block contributions collected during generation to be added to A at the end
+        self.finalization_blocks = defaultdict(list)
+
+        # Set of counters used for assigning names to intermediate variables
+        # TODO: Should this be part of the backend symbols? Doesn't really matter now.
+        self.symbol_counters = defaultdict(int)
+
+
+    def get_includes(self):
+        "Return list of include statements needed to support generated code."
+        includes = set()
+
+        # Get std::fill used by MemZero
+        includes.add("#include <algorithm>")
+
+        # For controlling floating point environment
+        #includes.add("#include <cfenv>")
+
+        # For intel intrinsics and controlling floating point environment
+        #includes.add("#include <xmmintrin.h>")
+
+        cmath_names = set((
+                "abs", "sign", "pow", "sqrt",
+                "exp", "ln",
+                "cos", "sin", "tan",
+                "acos", "asin", "atan", "atan_2",
+                "cosh", "sinh", "tanh",
+                "acosh", "asinh", "atanh",
+                "erf", "erfc",
+            ))
+
+        boost_math_names = set((
+            "bessel_j", "bessel_y", "bessel_i", "bessel_k",
+            ))
+
+        # Only return the necessary headers
+        if cmath_names & self._ufl_names:
+            includes.add("#include <cmath>")
+
+        if boost_math_names & self._ufl_names:
+            includes.add("#include <boost/math/special_functions.hpp>")
+
+        return sorted(includes)
+
+
+    def init_scopes(self):
+        "Initialize variable scope dicts."
+        # Reset variables, separate sets for quadrature loop
+        self.scopes = { num_points: {} for num_points in self.ir["all_num_points"] }
+        self.scopes[None] = {}
+
+
+    def set_var(self, num_points, v, vaccess):
+        """"Set a new variable in variable scope dicts.
+
+        Scope is determined by num_points which identifies the
+        quadrature loop scope or None if outside quadrature loops.
+
+        v is the ufl expression and vaccess is the CNodes
+        expression to access the value in the code.
+        """
+        self.scopes[num_points][v] = vaccess
+
+
+    def has_var(self, num_points, v):
+        """"Check if variable exists in variable scope dicts.
+
+        Return True if ufl expression v exists in the num_points scope.
+
+        NB! Does not fall back to piecewise scope.
+        """
+        return v in self.scopes[num_points]
+
+
+    def get_var(self, num_points, v):
+        """"Lookup ufl expression v in variable scope dicts.
+
+        Scope is determined by num_points which identifies the
+        quadrature loop scope or None if outside quadrature loops.
+
+        If v is not found in quadrature loop scope, the piecewise
+        scope (None) is checked.
+
+        Returns the CNodes expression to access the value in the code.
+        """
+        if v._ufl_is_literal_:
+            return self.backend.ufl_to_language(v)
+        f = self.scopes[num_points].get(v)
+        if f is None:
+            f = self.scopes[None][v]
+        return f
+
+
+    def new_temp_symbol(self, basename):
+        "Create a new code symbol named basename + running counter."
+        L = self.backend.language
+        name = "%s%d" % (basename, self.symbol_counters[basename])
+        self.symbol_counters[basename] += 1
+        return L.Symbol(name)
+
+
+    def get_temp_symbol(self, tempname, key):
+        key = (tempname,) + key
+        s = self.shared_blocks.get(key)
+        defined = s is not None
+        if not defined:
+            s = self.new_temp_symbol(tempname)
+            self.shared_blocks[key] = s
+        return s, defined
+
+
+    def generate(self):
+        """Generate entire tabulate_tensor body.
+
+        Assumes that the code returned from here will be wrapped in a context
+        that matches a suitable version of the UFC tabulate_tensor signatures.
+        """
+        L = self.backend.language
+
+        # Assert that scopes are empty: expecting this to be called only once
+        assert not any(d for d in self.scopes.values())
+
+        parts = []
+
+        # TODO: Is this needed? Find a test case to check.
+        parts += [
+            #L.VerbatimStatement("_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);"),
+            #L.VerbatimStatement("std::fesetenv(FE_DFL_DISABLE_SSE_DENORMS_ENV);"),
+            ]
+
+        # Generate the tables of quadrature points and weights
+        parts += self.generate_quadrature_tables()
+
+        # Generate the tables of basis function values and preintegrated blocks
+        parts += self.generate_element_tables()
+
+        # Generate code to compute piecewise constant scalar factors
+        parts += self.generate_unstructured_piecewise_partition()
+
+        # Loop generation code will produce parts to go before quadloops,
+        # to define the quadloops, and to go after the quadloops
+        all_preparts = []
+        all_quadparts = []
+        all_postparts = []
+
+        # Go through each relevant quadrature loop
+        if self.ir["integral_type"] in custom_integral_types:
+            preparts, quadparts, postparts = \
+              self.generate_runtime_quadrature_loop()
+            all_preparts += preparts
+            all_quadparts += quadparts
+            all_postparts += postparts
+        else:
+            for num_points in self.ir["all_num_points"]:
+                # Generate code to integrate reusable blocks of final element tensor
+                preparts, quadparts, postparts = \
+                    self.generate_quadrature_loop(num_points)
+                all_preparts += preparts
+                all_quadparts += quadparts
+                all_postparts += postparts
+
+        # Generate code to finish computing reusable blocks outside quadloop
+        preparts, quadparts, postparts = \
+            self.generate_dofblock_partition(None)
+        all_preparts += preparts
+        all_quadparts += quadparts
+        all_postparts += postparts
+
+        # Generate code to fill in A
+        all_finalizeparts = []
+
+        # Generate code to set A = 0
+        #all_finalizeparts += self.generate_tensor_reset()
+
+        # Generate code to compute piecewise constant scalar factors
+        # and set A at corresponding nonzero components
+        all_finalizeparts += self.generate_preintegrated_dofblock_partition()
+
+        # Generate code to add reusable blocks B* to element tensor A
+        all_finalizeparts += self.generate_copyout_statements()
+
+        # Collect parts before, during, and after quadrature loops
+        parts += all_preparts
+        parts += all_quadparts
+        parts += all_postparts
+        parts += all_finalizeparts
+
+        return L.StatementList(parts)
+
+
+    def generate_quadrature_tables(self):
+        "Generate static tables of quadrature points and weights."
+        L = self.backend.language
+
+        parts = []
+
+        # No quadrature tables for custom (given argument)
+        # or point (evaluation in single vertex)
+        skip = custom_integral_types + point_integral_types
+        if self.ir["integral_type"] in skip:
+            return parts
+
+        alignas = self.ir["params"]["alignas"]
+        padlen = self.ir["params"]["padlen"]
+
+        # Loop over quadrature rules
+        for num_points in self.ir["all_num_points"]:
+            varying_ir = self.ir["varying_irs"][num_points]
+
+            points, weights = self.ir["quadrature_rules"][num_points]
+            assert num_points == len(weights)
+            assert num_points == points.shape[0]
+
+            # Generate quadrature weights array
+            if varying_ir["need_weights"]:
+                wsym = self.backend.symbols.weights_table(num_points)
+                parts += [L.ArrayDecl("static const double", wsym,
+                                      num_points, weights,
+                                      alignas=alignas)]
+
+            # Generate quadrature points array
+            N = product(points.shape)
+            if varying_ir["need_points"] and N:
+                # Flatten array: (TODO: avoid flattening here, it makes padding harder)
+                flattened_points = points.reshape(N)
+                psym = self.backend.symbols.points_table(num_points)
+                parts += [L.ArrayDecl("static const double", psym,
+                                      N, flattened_points,
+                                      alignas=alignas)]
+
+        # Add leading comment if there are any tables
+        parts = L.commented_code_list(parts, "Quadrature rules")
+        return parts
+
+
+    def generate_element_tables(self):
+        """Generate static tables with precomputed element basis
+        function values in quadrature points."""
+        L = self.backend.language
+        parts = []
+
+        tables = self.ir["unique_tables"]
+        table_types = self.ir["unique_table_types"]
+        inline_tables = self.ir["integral_type"] == "cell"
+
+        alignas = self.ir["params"]["alignas"]
+        padlen = self.ir["params"]["padlen"]
+
+        if self.ir["integral_type"] in custom_integral_types:
+            # Define only piecewise tables
+            table_names = [name for name in sorted(tables)
+                           if table_types[name] in piecewise_ttypes]
+        else:
+            # Define all tables
+            table_names = sorted(tables)
+
+        for name in table_names:
+            table = tables[name]
+
+            # Don't pad preintegrated tables
+            if name[0] == "P":
+                p = 1
+            else:
+                p = padlen
+
+            # Skip tables that are inlined in code generation
+            if inline_tables and name[:2] == "PI":
+                continue
+
+            decl = L.ArrayDecl("static const double", name,
+                               table.shape, table,
+                               alignas=alignas,
+                               padlen=p)
+            parts += [decl]
+
+        # Add leading comment if there are any tables
+        parts = L.commented_code_list(parts, [
+            "Precomputed values of basis functions and precomputations",
+            "FE* dimensions: [entities][points][dofs]",
+            "PI* dimensions: [entities][dofs][dofs] or [entities][dofs]",
+            "PM* dimensions: [entities][dofs][dofs]",
+            ])
+        return parts
+
+
+    def generate_tensor_reset(self):
+        "Generate statements for resetting the element tensor to zero."
+        L = self.backend.language
+        A = self.backend.symbols.element_tensor()
+        A_shape = self.ir["tensor_shape"]
+        A_size = product(A_shape)
+        parts = [
+            L.Comment("Reset element tensor"),
+            L.MemZero(A, A_size),
+            ]
+        return parts
+
+
+    def generate_quadrature_loop(self, num_points):
+        "Generate quadrature loop with for this num_points."
+        L = self.backend.language
+
+        # Generate unstructured varying partition
+        body = self.generate_unstructured_varying_partition(num_points)
+        body = L.commented_code_list(body,
+            "Quadrature loop body setup (num_points={0})".format(num_points))
+
+        # Generate dofblock parts, some of this
+        # will be placed before or after quadloop
+        preparts, quadparts, postparts = \
+            self.generate_dofblock_partition(num_points)
+        body += quadparts
+
+        # Wrap body in loop or scope
+        if not body:
+            # Could happen for integral with everything zero and optimized away
+            quadparts = []
+        elif num_points == 1:
+            # For now wrapping body in Scope to avoid thinking about scoping issues
+            quadparts = L.commented_code_list(L.Scope(body),
+                "Only 1 quadrature point, no loop")
+        else:
+            # Regular case: define quadrature loop
+            if num_points == 1:
+                iq = 0
+            else:
+                iq = self.backend.symbols.quadrature_loop_index()
+            quadparts = [L.ForRange(iq, 0, num_points, body=body)]
+
+        return preparts, quadparts, postparts
+
+
+    def generate_runtime_quadrature_loop(self):
+        "Generate quadrature loop for custom integrals, with physical points given runtime."
+        L = self.backend.language
+
+        assert self.ir["integral_type"] in custom_integral_types
+
+        num_points = self.ir["fake_num_points"]
+        chunk_size = self.ir["params"]["chunk_size"]
+
+        gdim = self.ir["geometric_dimension"]
+        tdim = self.ir["topological_dimension"]
+
+        alignas = self.ir["params"]["alignas"]
+        #padlen = self.ir["params"]["padlen"]
+
+        tables = self.ir["unique_tables"]
+        table_types = self.ir["unique_table_types"]
+        #table_origins = self.ir["unique_table_origins"]  # FIXME
+
+        # Generate unstructured varying partition
+        body = self.generate_unstructured_varying_partition(num_points)
+        body = L.commented_code_list(body,
+            ["Run-time quadrature loop body setup",
+             "(chunk_size={0}, analysis_num_points={1})".format(
+                 chunk_size, num_points)])
+
+        # Generate dofblock parts, some of this
+        # will be placed before or after quadloop
+        preparts, quadparts, postparts = \
+            self.generate_dofblock_partition(num_points)
+        body += quadparts
+
+        # Wrap body in loop
+        if not body:
+            # Could happen for integral with everything zero and optimized away
+            quadparts = []
+        else:
+            rule_parts = []
+
+            # Define two-level quadrature loop; over chunks then over points in chunk
+            iq_chunk = L.Symbol("iq_chunk")
+            np = self.backend.symbols.num_custom_quadrature_points()
+            num_point_blocks = (np + chunk_size - 1) / chunk_size
+            iq = self.backend.symbols.quadrature_loop_index()
+
+            # Not assuming runtime size to be multiple by chunk size
+            num_points_in_block = L.Symbol("num_points_in_chunk")
+            decl = L.VariableDecl("const int", num_points_in_block,
+                                  L.Call("min", (chunk_size, np - iq_chunk * chunk_size)))
+            rule_parts.append(decl)
+
+            iq_body = L.ForRange(iq, 0, num_points_in_block, body=body)
+
+
+            ### Preparations for quadrature rules
+            # 
+            varying_ir = self.ir["varying_irs"][num_points]
+
+            # Copy quadrature weights for this chunk
+            if varying_ir["need_weights"]:
+                cwsym = self.backend.symbols.custom_quadrature_weights()
+                wsym = self.backend.symbols.custom_weights_table()
+                rule_parts += [
+                    L.ArrayDecl("double", wsym, chunk_size, 0,
+                                alignas=alignas),
+                    L.ForRange(iq, 0, num_points_in_block,
+                               body=L.Assign(wsym[iq], cwsym[chunk_size*iq_chunk + iq])),
+                    ]
+
+            # Copy quadrature points for this chunk
+            if varying_ir["need_points"]:
+                cpsym = self.backend.symbols.custom_quadrature_points()
+                psym = self.backend.symbols.custom_points_table()
+                rule_parts += [
+                    L.ArrayDecl("double", psym, chunk_size * gdim, 0,
+                                alignas=alignas),
+                    L.ForRange(iq, 0, num_points_in_block,
+                        body=[L.Assign(psym[iq*gdim + i],
+                                       cpsym[chunk_size*iq_chunk*gdim + iq*gdim + i])
+                              for i in range(gdim)])
+                    ]
+
+            # Add leading comment if there are any tables
+            rule_parts = L.commented_code_list(rule_parts,
+                "Quadrature weights and points")
+
+
+            ### Preparations for element tables
+            table_parts = []
+
+            # Only declare non-piecewise tables, computed inside chunk loop
+            non_piecewise_tables = [name for name in sorted(tables)
+                                    if table_types[name] not in piecewise_ttypes]
+            for name in non_piecewise_tables:
+                table = tables[name]
+                decl = L.ArrayDecl("double", name,
+                                   (1, chunk_size, table.shape[2]), 0,
+                                   alignas=alignas)  # padlen=padlen)
+                table_parts += [decl]
+
+            table_parts += [L.Comment("FIXME: Fill element tables here")]
+            #table_origins
+
+            ### Gather all in chunk loop
+            chunk_body = rule_parts + table_parts + [iq_body]
+            quadparts = [L.ForRange(iq_chunk, 0, num_point_blocks, body=chunk_body)]
+
+        return preparts, quadparts, postparts
+
+
+    def generate_unstructured_piecewise_partition(self):
+        L = self.backend.language
+
+        num_points = None
+        expr_ir = self.ir["piecewise_ir"]
+
+        name = "sp"
+        arraysymbol = L.Symbol(name)
+        parts = self.generate_partition(arraysymbol,
+                                        expr_ir["V"],
+                                        expr_ir["V_active"],
+                                        expr_ir["V_mts"],
+                                        expr_ir["mt_tabledata"],
+                                        num_points)
+        parts = L.commented_code_list(parts,
+            "Unstructured piecewise computations")
+        return parts
+
+
+    def generate_unstructured_varying_partition(self, num_points):
+        L = self.backend.language
+
+        expr_ir = self.ir["varying_irs"][num_points]
+
+        name = "sv"
+        arraysymbol = L.Symbol("%s%d" % (name, num_points))
+        parts = self.generate_partition(arraysymbol,
+                                        expr_ir["V"],
+                                        expr_ir["V_varying"],
+                                        expr_ir["V_mts"],
+                                        expr_ir["mt_tabledata"],
+                                        num_points)
+        parts = L.commented_code_list(parts,
+            "Unstructured varying computations for num_points=%d" % (num_points,))
+        return parts
+
+
+    def generate_partition(self, symbol, V, V_active, V_mts, mt_tabledata, num_points):
+        L = self.backend.language
+
+        definitions = []
+        intermediates = []
+
+        active_indices = [i for i, p in enumerate(V_active) if p]
+
+        for i in active_indices:
+            v = V[i]
+            mt = V_mts[i]
+
+            if v._ufl_is_literal_:
+                vaccess = self.backend.ufl_to_language(v)
+            elif mt is not None:
+                # All finite element based terminals has table data, as well
+                # as some but not all of the symbolic geometric terminals
+                tabledata = mt_tabledata.get(mt)
+
+                # Backend specific modified terminal translation
+                vaccess = self.backend.access(mt.terminal,
+                    mt, tabledata, num_points)
+                vdef = self.backend.definitions(mt.terminal,
+                    mt, tabledata, num_points, vaccess)
+
+                # Store definitions of terminals in list
+                assert isinstance(vdef, list)
+                definitions.extend(vdef)
+            else:
+                # Get previously visited operands
+                vops = [self.get_var(num_points, op) for op in v.ufl_operands]
+
+                # Mapping UFL operator to target language
+                self._ufl_names.add(v._ufl_handler_name_)
+                vexpr = self.backend.ufl_to_language(v, *vops)
+
+                # TODO: Let optimized ir provide mapping of vertex indices to
+                # variable indices, marking which subexpressions to store in variables
+                # and in what order:
+                #j = variable_id[i]
+
+                # Currently instead creating a new intermediate for
+                # each subexpression except boolean conditions
+                if isinstance(v, Condition):
+                    # Inline the conditions x < y, condition values
+                    # 'x' and 'y' may still be stored in intermediates.
+                    # This removes the need to handle boolean intermediate variables.
+                    # With tensor-valued conditionals it may not be optimal but we
+                    # let the C++ compiler take responsibility for optimizing those cases.
+                    j = None
+                elif any(op._ufl_is_literal_ for op in v.ufl_operands):
+                    # Skip intermediates for e.g. -2.0*x,
+                    # resulting in lines like z = y + -2.0*x
+                    j = None
+                else:
+                    j = len(intermediates)
+
+                if j is not None:
+                    # Record assignment of vexpr to intermediate variable
+                    if self.ir["params"]["use_symbol_array"]:
+                        vaccess = symbol[j]
+                        intermediates.append(L.Assign(vaccess, vexpr))
+                    else:
+                        vaccess = L.Symbol("%s_%d" % (symbol.name, j))
+                        intermediates.append(L.VariableDecl("const double", vaccess, vexpr))
+                else:
+                    # Access the inlined expression
+                    vaccess = vexpr
+
+            # Store access node for future reference
+            self.set_var(num_points, v, vaccess)
+
+        # Join terminal computation, array of intermediate expressions,
+        # and intermediate computations
+        parts = []
+        if definitions:
+            parts += definitions
+        if intermediates:
+            if self.ir["params"]["use_symbol_array"]:
+                alignas = self.ir["params"]["alignas"]
+                parts += [L.ArrayDecl("double", symbol,
+                                      len(intermediates),
+                                      alignas=alignas)]
+            parts += intermediates
+        return parts
+
+
+    def generate_dofblock_partition(self, num_points):
+        L = self.backend.language
+
+        if num_points is None:  # NB! None meaning piecewise partition, not custom integral
+            block_contributions = self.ir["piecewise_ir"]["block_contributions"]
+        else:
+            block_contributions = self.ir["varying_irs"][num_points]["block_contributions"]
+
+        preparts = []
+        quadparts = []
+        postparts = []
+
+        blocks = [(blockmap, blockdata)
+                  for blockmap, contributions in sorted(block_contributions.items())
+                  for blockdata in contributions
+                  if blockdata.block_mode != "preintegrated"]
+
+        for blockmap, blockdata in blocks:
+            # Get symbol for already defined block B if it exists
+            common_block_data = get_common_block_data(blockdata)
+            B = self.shared_blocks.get(common_block_data)
+            if B is None:
+                # Define code for block depending on mode
+                B, block_preparts, block_quadparts, block_postparts = \
+                    self.generate_block_parts(num_points, blockmap, blockdata)
+
+                # Add definitions
+                preparts.extend(block_preparts)
+
+                # Add computations
+                quadparts.extend(block_quadparts)
+
+                # Add finalization
+                postparts.extend(block_postparts)
+
+                # Store reference for reuse
+                self.shared_blocks[common_block_data] = B
+
+            # Add A[blockmap] += B[...] to finalization
+            self.finalization_blocks[blockmap].append(B)
+
+        return preparts, quadparts, postparts
+
+
+    def get_entities(self, blockdata):
+        L = self.backend.language
+
+        if self.ir["integral_type"] == "interior_facet":
+            # Get the facet entities
+            entities = []
+            for r in blockdata.restrictions:
+                if r is None:
+                    entities.append(0)
+                else:
+                    entities.append(self.backend.symbols.entity(self.ir["entitytype"], r))
+            if blockdata.transposed:
+                return (entities[1], entities[0])
+            else:
+                return tuple(entities)
+        else:
+            # Get the current cell or facet entity
+            if blockdata.is_uniform:
+                # uniform, i.e. constant across facets
+                entity = L.LiteralInt(0)
+            else:
+                entity = self.backend.symbols.entity(self.ir["entitytype"], None)
+            return (entity,)
+
+
+    def get_arg_factors(self, blockdata, block_rank, num_points, iq, indices):
+        L = self.backend.language
+
+        arg_factors = []
+        for i in range(block_rank):
+            mad = blockdata.ma_data[i]
+            td = mad.tabledata
+            if td.is_piecewise:
+                scope = self.ir["piecewise_ir"]["modified_arguments"]
+            else:
+                scope = self.ir["varying_irs"][num_points]["modified_arguments"]
+            mt = scope[mad.ma_index]
+
+            # Translate modified terminal to code
+            # TODO: Move element table access out of backend?
+            #       Not using self.backend.access.argument() here
+            #       now because it assumes too much about indices.
+
+            table = self.backend.symbols.element_table(td,
+                self.ir["entitytype"], mt.restriction)
+
+            assert td.ttype != "zeros"
+
+            if td.ttype == "ones":
+                arg_factor = L.LiteralFloat(1.0)
+            elif td.ttype == "quadrature":  # TODO: Revisit all quadrature ttype checks
+                arg_factor = table[iq]
+            else:
+                # Assuming B sparsity follows element table sparsity
+                arg_factor = table[indices[i]]
+            arg_factors.append(arg_factor)
+        return arg_factors
+
+
+    def generate_block_parts(self, num_points, blockmap, blockdata):
+        """Generate and return code parts for a given block.
+
+        Returns parts occuring before, inside, and after
+        the quadrature loop identified by num_points.
+
+        Should be called with num_points=None for quadloop-independent blocks.
+        """
+        L = self.backend.language
+
+        # The parts to return
+        preparts = []
+        quadparts = []
+        postparts = []
+
+        # TODO: Define names in backend symbols?
+        #tempnames = self.backend.symbols.block_temp_names
+        #blocknames = self.backend.symbols.block_names
+        tempnames = {
+            #"preintegrated": "TI",
+            "premultiplied": "TM",
+            "partial": "TP",
+            "full": "TF",
+            "safe": "TS",
+            "quadrature": "TQ",
+            }
+        blocknames = {
+            #"preintegrated": "BI",
+            #"premultiplied": "BM",
+            #"partial": "BP",
+            "full": "BF",
+            "safe": "BS",
+            "quadrature": "BQ",
+            }
+
+        fwtempname = "fw"
+        tempname = tempnames.get(blockdata.block_mode)
+
+        alignas = self.ir["params"]["alignas"]
+        padlen = self.ir["params"]["padlen"]
+
+        block_rank = len(blockmap)
+        blockdims = tuple(len(dofmap) for dofmap in blockmap)
+        padded_blockdims = pad_innermost_dim(blockdims, padlen)
+
+        ttypes = blockdata.ttypes
+        if "zeros" in ttypes:
+            error("Not expecting zero arguments to be left in dofblock generation.")
+
+        if num_points is None:
+            iq = None
+        elif num_points == 1:
+            iq = 0
+        else:
+            iq = self.backend.symbols.quadrature_loop_index()
+
+        # Override dof index with quadrature loop index for arguments with
+        # quadrature element, to index B like B[iq*num_dofs + iq]
+        arg_indices = tuple(self.backend.symbols.argument_loop_index(i)
+                            for i in range(block_rank))
+        B_indices = []
+        for i in range(block_rank):
+            if ttypes[i] == "quadrature":
+                B_indices.append(iq)
+            else:
+                B_indices.append(arg_indices[i])
+        B_indices = tuple(B_indices)
+
+        # Define unique block symbol
+        blockname = blocknames.get(blockdata.block_mode)
+        if blockname:
+            B = self.new_temp_symbol(blockname)
+            # Add initialization of this block to parts
+            # For all modes, block definition occurs before quadloop
+            preparts.append(L.ArrayDecl("double", B, blockdims, 0,
+                                        alignas=alignas,
+                                        padlen=padlen))
+
+        # Get factor expression
+        if blockdata.factor_is_piecewise:
+            v = self.ir["piecewise_ir"]["V"][blockdata.factor_index]
+        else:
+            v = self.ir["varying_irs"][num_points]["V"][blockdata.factor_index]
+        f = self.get_var(num_points, v)
+
+        # Quadrature weight was removed in representation, add it back now
+        if num_points is None:
+            weight = L.LiteralFloat(1.0)
+        elif self.ir["integral_type"] in custom_integral_types:
+            weights = self.backend.symbols.custom_weights_table()
+            weight = weights[iq]
+        else:
+            weights = self.backend.symbols.weights_table(num_points)
+            weight = weights[iq]
+
+        # Define fw = f * weight
+        if blockdata.block_mode in ("safe", "full", "partial"):
+            assert not blockdata.transposed, "Not handled yet"
+
+            # Fetch code to access modified arguments
+            arg_factors = self.get_arg_factors(blockdata, block_rank, num_points, iq, B_indices)
+
+            fw_rhs = L.float_product([f, weight])
+            if not isinstance(fw_rhs, L.Product):
+                fw = fw_rhs
+            else:
+                # Define and cache scalar temp variable
+                key = (num_points, blockdata.factor_index, blockdata.factor_is_piecewise)
+                fw, defined = self.get_temp_symbol(fwtempname, key)
+                if not defined:
+                    quadparts.append(L.VariableDecl("const double", fw, fw_rhs))
+
+                # Plan for vectorization of fw computations over iq:
+                # 1) Define fw as arrays e.g. "double fw0[nq];" outside quadloop
+                # 2) Access as fw0[iq] of course
+                # 3) Split quadrature loops, one for fw computation and one for blocks
+                # 4) Pad quadrature rule with 0 weights and last point
+
+                # Plan for vectorization of coefficient evaluation over iq:
+                # 1) Define w0_c1 etc as arrays e.g. "double w0_c1[nq] = {};" outside quadloop
+                # 2) Access as w0_c1[iq] of course
+                # 3) Splitquadrature loops, coefficients before fw computation
+                # 4) Possibly swap loops over iq and ic:
+                #    for(ic) for(iq) w0_c1[iq] = w[0][ic] * FE[iq][ic];
+
+        if blockdata.block_mode == "safe":
+            # Naively accumulate integrand for this block in the innermost loop
+            assert not blockdata.transposed
+            B_rhs = L.float_product([fw] + arg_factors)
+            body = L.AssignAdd(B[B_indices], B_rhs)  # NB! += not =
+            for i in reversed(range(block_rank)):
+                body = L.ForRange(B_indices[i], 0, padded_blockdims[i], body=body)
+            quadparts += [body]
+
+            # Define rhs expression for A[blockmap[arg_indices]] += A_rhs
+            A_rhs = B[arg_indices]
+
+        elif blockdata.block_mode == "full":
+            assert not blockdata.transposed, "Not handled yet"
+
+            if block_rank < 2:
+                # Multiply collected factors
+                B_rhs = L.float_product([fw] + arg_factors)
+            else:
+                # TODO: Pick arg with smallest dimension, or pick
+                # based on global optimization to reuse more blocks
+                i = 0  # Index selected for precomputation
+                j = 1 - i
+
+                P_index = B_indices[i]
+
+                key = (num_points, blockdata.factor_index, blockdata.factor_is_piecewise,
+                       arg_factors[i].ce_format(self.precision))
+                P, defined = self.get_temp_symbol(tempname, key)
+                if not defined:
+                    # TODO: If FE table is varying and only used in contexts
+                    # where it's multiplied by weight, we can premultiply it!
+                    # Then this would become P = f * preweighted_FE_table[:].
+
+                    # Define and compute intermediate value
+                    # P[:] = (weight * f) * args[i][:]
+                    # inside quadrature loop
+                    P_dim = blockdims[i]
+                    quadparts.append(L.ArrayDecl("double", P, P_dim, None,
+                                                 alignas=alignas,
+                                                 padlen=padlen))
+                    P_rhs = L.float_product([fw, arg_factors[i]])
+                    body = L.Assign(P[P_index], P_rhs)
+                    #if ttypes[i] != "quadrature":  # FIXME: What does this mean here?
+                    vectorize = self.ir["params"]["vectorize"]
+                    body = L.ForRange(P_index, 0, P_dim,
+                                      body=body, vectorize=vectorize)
+                    quadparts.append(body)
+
+                B_rhs = P[P_index] * arg_factors[j]
+
+            # Add result to block inside quadloop
+            body = L.AssignAdd(B[B_indices], B_rhs)  # NB! += not =
+            for i in reversed(range(block_rank)):
+                # Vectorize only the innermost loop
+                vectorize = self.ir["params"]["vectorize"] and (i == block_rank - 1)
+                if ttypes[i] != "quadrature":
+                    body = L.ForRange(B_indices[i], 0, padded_blockdims[i],
+                                      body=body, vectorize=vectorize)
+            quadparts += [body]
+
+            # Define rhs expression for A[blockmap[arg_indices]] += A_rhs
+            A_rhs = B[arg_indices]
+
+        elif blockdata.block_mode == "partial":
+            # TODO: To handle transpose here, must add back intermediate block B
+            assert not blockdata.transposed, "Not handled yet"
+
+            # Get indices and dimensions right here...
+            assert block_rank == 2
+            i = blockdata.piecewise_ma_index
+            not_piecewise_index = 1 - i
+
+            P_index = arg_indices[not_piecewise_index]
+
+            key = (num_points, blockdata.factor_index, blockdata.factor_is_piecewise,
+                   arg_factors[not_piecewise_index].ce_format(self.precision))
+            P, defined = self.get_temp_symbol(tempname, key)
+            if not defined:
+                # Declare P table in preparts
+                P_dim = blockdims[not_piecewise_index]
+                preparts.append(L.ArrayDecl("double", P, P_dim, 0,
+                                            alignas=alignas,
+                                            padlen=padlen))
+
+                # Multiply collected factors
+                P_rhs = L.float_product([fw, arg_factors[not_piecewise_index]])
+
+                # Accumulate P += weight * f * args in quadrature loop
+                body = L.AssignAdd(P[P_index], P_rhs)
+                body = L.ForRange(P_index, 0, pad_dim(P_dim, padlen), body=body)
+                quadparts.append(body)
+
+            # Define B = B_rhs = piecewise_argument[:] * P[:], where P[:] = sum_q weight * f * other_argument[:]
+            B_rhs = arg_factors[i] * P[P_index]
+
+            # Define rhs expression for A[blockmap[arg_indices]] += A_rhs
+            A_rhs = B_rhs
+
+        elif blockdata.block_mode in ("premultiplied", "preintegrated"):
+            P_entity_indices = self.get_entities(blockdata)
+            if blockdata.transposed:
+                P_block_indices = (arg_indices[1], arg_indices[0])
+            else:
+                P_block_indices = arg_indices
+            P_ii = P_entity_indices + P_block_indices
+
+            if blockdata.block_mode == "preintegrated":
+                # Preintegrated should never get into quadloops
+                assert num_points is None
+
+                # Define B = B_rhs = f * PI where PI = sum_q weight * u * v
+                PI = L.Symbol(blockdata.name)[P_ii]
+                B_rhs = L.float_product([f, PI])
+
+            elif blockdata.block_mode == "premultiplied":
+                key = (num_points, blockdata.factor_index, blockdata.factor_is_piecewise)
+                FI, defined = self.get_temp_symbol(tempname, key)
+                if not defined:
+                    # Declare FI = 0 before quadloop
+                    preparts += [L.VariableDecl("double", FI, 0)]
+                    # Accumulate FI += weight * f in quadparts
+                    quadparts += [L.AssignAdd(FI, L.float_product([weight, f]))]
+
+                # Define B_rhs = FI * PM where FI = sum_q weight*f, and PM = u * v
+                PM = L.Symbol(blockdata.name)[P_ii]
+                B_rhs = L.float_product([FI, PM])
+
+            # Define rhs expression for A[blockmap[arg_indices]] += A_rhs
+            A_rhs = B_rhs
+
+        return A_rhs, preparts, quadparts, postparts
+
+
+    def generate_preintegrated_dofblock_partition(self):
+        # FIXME: Generalize this to unrolling all A[] += ... loops, or all loops with noncontiguous DM??
+        L = self.backend.language
+        block_contributions = self.ir["piecewise_ir"]["block_contributions"]
+
+        blocks = [(blockmap, blockdata)
+                  for blockmap, contributions in sorted(block_contributions.items())
+                  for blockdata in contributions
+                  if blockdata.block_mode == "preintegrated"]
+
+        # Get symbol, dimensions, and loop index symbols for A
+        A_shape = self.ir["tensor_shape"]
+        A_size = product(A_shape)
+        A_rank = len(A_shape)
+
+        A_strides = [1]*A_rank  # TODO: there's something like shape2strides(A_shape) somewhere
+        for i in reversed(range(0, A_rank-1)):
+            A_strides[i] = A_strides[i+1] * A_shape[i+1]
+
+        A = self.backend.symbols.element_tensor()
+        #A = L.FlattenedArray(A, dims=A_shape)
+
+        A_values = [0.0] * A_size
+
+        for blockmap, blockdata in blocks:
+            # Accumulate A[blockmap[...]] += f*PI[...]
+
+            # Get table for inlining
+            tables = self.ir["unique_tables"]
+            table = tables[blockdata.name]
+            inline_table = self.ir["integral_type"] == "cell"
+
+            # Get factor expression
+            v = self.ir["piecewise_ir"]["V"][blockdata.factor_index]
+            f = self.get_var(None, v)
+
+            # Define rhs expression for A[blockmap[arg_indices]] += A_rhs
+            # A_rhs = f * PI where PI = sum_q weight * u * v
+            PI = L.Symbol(blockdata.name)
+            block_rank = len(blockmap)
+
+            # Override dof index with quadrature loop index for arguments with
+            # quadrature element, to index B like B[iq*num_dofs + iq]
+            arg_indices = tuple(self.backend.symbols.argument_loop_index(i)
+                                for i in range(block_rank))
+
+            # Define indices into preintegrated block
+            P_entity_indices = self.get_entities(blockdata)
+            if inline_table:
+                assert P_entity_indices == (L.LiteralInt(0),)
+                assert table.shape[0] == 1
+
+            # Unroll loop
+            blockshape = [len(DM) for DM in blockmap]
+            blockrange = [range(d) for d in blockshape]
+
+            for ii in itertools.product(*blockrange):
+                A_ii = sum(A_strides[i] * blockmap[i][ii[i]]
+                           for i in range(len(ii)))
+                if blockdata.transposed:
+                    P_arg_indices = (ii[1], ii[0])
+                else:
+                    P_arg_indices = ii
+
+                if inline_table:
+                    # Extract float value of PI[P_ii]
+                    Pval = table[0]  # always entity 0
+                    for i in P_arg_indices:
+                        Pval = Pval[i]
+                    A_rhs = Pval * f
+                else:
+                    # Index the static preintegrated table:
+                    P_ii = P_entity_indices + P_arg_indices
+                    A_rhs = f * PI[P_ii]
+
+                A_values[A_ii] = A_values[A_ii] + A_rhs
+
+        return self.generate_tensor_value_initialization(A_values)
+
+
+    def generate_tensor_value_initialization(self, A_values):
+        parts = []
+
+        L = self.backend.language
+        A = self.backend.symbols.element_tensor()
+        A_size = len(A_values)
+
+        init_mode = self.ir["params"]["tensor_init_mode"]
+        z = L.LiteralFloat(0.0)
+
+        if init_mode == "direct":
+            # Generate A[i] = A_values[i] including zeros
+            for i in range(A_size):
+                parts += [L.Assign(A[i], A_values[i])]
+        elif init_mode == "upfront":
+            # Zero everything first
+            parts += [L.MemZero(A, A_size)]
+
+            # Generate A[i] = A_values[i] skipping zeros
+            for i in range(A_size):
+                if not (A_values[i] == 0.0 or A_values[i] == z):
+                    parts += [L.Assign(A[i], A_values[i])]
+        elif init_mode == "interleaved":
+            # Generate A[i] = A_values[i] with interleaved zero filling
+            i = 0
+            zero_begin = 0
+            zero_end = zero_begin
+            while i < A_size:
+                if A_values[i] == 0.0 or A_values[i] == z:
+                    # Update range of A zeros
+                    zero_end = i + 1
+                else:
+                    # Set zeros of A just prior to A[i]
+                    if zero_end == zero_begin + 1:
+                        parts += [L.Assign(A[zero_begin], 0.0)]
+                    elif zero_end > zero_begin:
+                        parts += [L.MemZeroRange(A, zero_begin, zero_end)]
+                    zero_begin = i + 1
+                    zero_end = zero_begin
+                    # Set A[i] value
+                    parts += [L.Assign(A[i], A_values[i])]
+                i += 1
+            if zero_end == zero_begin + 1:
+                parts += [L.Assign(A[zero_begin], 0.0)]
+            elif zero_end > zero_begin:
+                parts += [L.MemZeroRange(A, zero_begin, zero_end)]
+        else:
+            error("Invalid init_mode parameter %s" % (init_mode,))
+
+        return parts
+
+
+    def generate_expr_copyout_statements(self):
+        L = self.backend.language
+        parts = []
+
+        # Not expecting any quadrature loop scopes here
+        assert tuple(self.scopes.keys()) == (None,)
+
+        # TODO: Get symbol from backend
+        values = L.Symbol("values")
+
+        # TODO: Allow expression compilation to compute multiple points at once!
+        # Similarities to custom integrals in that points are given,
+        # while different in output format: results are not accumulated
+        # for each point but stored in output array instead.
+
+        # Assign computed results to output variables
+        pir = self.ir["piecewise_ir"]
+        V = pir["V"]
+        V_targets = pir["V_targets"]
+        for i, fi in enumerate(V_targets):
+            parts.append(L.Assign(values[i], self.get_var(None, V[fi])))
+
+        return parts
+
+
+    def generate_tensor_copyout_statements(self):
+        L = self.backend.language
+        parts = []
+
+        # Get symbol, dimensions, and loop index symbols for A
+        A_shape = self.ir["tensor_shape"]
+        A_size = product(A_shape)
+        A_rank = len(A_shape)
+
+        A_strides = [1]*A_rank  # TODO: there's something like shape2strides(A_shape) somewhere
+        for i in reversed(range(0, A_rank-1)):
+            A_strides[i] = A_strides[i+1] * A_shape[i+1]
+
+        Asym = self.backend.symbols.element_tensor()
+        A = L.FlattenedArray(Asym, dims=A_shape)
+
+        indices = [self.backend.symbols.argument_loop_index(i)
+                   for i in range(A_rank)]
+
+        dofmap_parts = []
+        dofmaps = {}
+        for blockmap, contributions in sorted(self.finalization_blocks.items()):
+
+            # Define mapping from B indices to A indices
+            A_indices = []
+            for i in range(A_rank):
+                dofmap = blockmap[i]
+                begin = dofmap[0]
+                end = dofmap[-1] + 1
+                if len(dofmap) == end - begin:
+                    # Dense insertion, offset B index to index A
+                    j = indices[i] + begin
+                else:
+                    # Sparse insertion, map B index through dofmap
+                    DM = dofmaps.get(dofmap)
+                    if DM is None:
+                        DM = L.Symbol("DM%d" % len(dofmaps))
+                        dofmaps[dofmap] = DM
+                        dofmap_parts.append(L.ArrayDecl("static const int", DM, len(dofmap), dofmap))
+                    j = DM[indices[i]]
+                A_indices.append(j)
+            A_indices = tuple(A_indices)
+
+            # Sum up all blocks contributing to this blockmap
+            term = L.Sum([B_rhs for B_rhs in contributions])
+
+            # TODO: need ttypes associated with this block to deal
+            # with loop dropping for quadrature elements:
+            ttypes = ()
+            if ttypes == ("quadrature", "quadrature"):
+                debug("quadrature element block insertion not optimized")
+
+            # Add components of all B's to A component in loop nest
+            body = L.AssignAdd(A[A_indices], term)
+            for i in reversed(range(A_rank)):
+                body = L.ForRange(indices[i], 0, len(blockmap[i]), body=body)
+
+            # Add this block to parts
+            parts.append(body)
+
+        # Place static dofmap tables first
+        parts = dofmap_parts + parts
+
+        return parts
+
+
+    def generate_copyout_statements(self):
+        """Generate statements copying results to output array."""
+        if self.ir["integral_type"] == "expression":
+            return self.generate_expr_copyout_statements()
+        #elif self.ir["unroll_copyout_loops"]:
+        #    return self.generate_unrolled_tensor_copyout_statements()
+        else:
+            return self.generate_tensor_copyout_statements()
diff --git a/ffc/uflacs/language/cnodes.py b/ffc/uflacs/language/cnodes.py
index 3dd83f7..7bc62c8 100644
--- a/ffc/uflacs/language/cnodes.py
+++ b/ffc/uflacs/language/cnodes.py
@@ -17,17 +17,18 @@
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
 from __future__ import print_function  # used in some debugging
+
 from six import string_types
 import numpy
+import numbers
 
-from ffc.uflacs.language.format_value import format_value, format_float
+from ffc.uflacs.language.format_value import format_value, format_float, format_int
 from ffc.uflacs.language.format_lines import format_indented_lines, Indented
 from ffc.uflacs.language.precedence import PRECEDENCE
 
 
 """CNode TODO:
 - Array copy statement
-- Memzero statement
 - Extend ArrayDecl and ArrayAccess with support for
   flattened but conceptually multidimensional arrays,
   maybe even with padding (FlattenedArray possibly covers what we need)
@@ -42,39 +43,91 @@ from ffc.uflacs.language.precedence import PRECEDENCE
 
 ############## Some helper functions
 
-def assign_loop(src, dst, ranges):
-    """Generate a nested loop over a list of ranges, assigning dst to src in the innermost loop.
+def assign_loop(dst, src, ranges):
+    """Generate a nested loop over a list of ranges, assigning src to dst in the innermost loop.
 
     Ranges is a list on the format [(index, begin, end),...].
     """
-    code = Assign(src, dst)
+    code = Assign(dst, src)
     for i, b, e in reversed(ranges):
         code = ForRange(i, b, e, code)
     return code
 
 
-def accumulate_loop(src, dst, ranges):
-    """Generate a nested loop over a list of ranges, adding dst to src in the innermost loop.
+def accumulate_loop(dst, src, ranges):
+    """Generate a nested loop over a list of ranges, adding src to dst in the innermost loop.
 
     Ranges is a list on the format [(index, begin, end),...].
     """
-    code = AssignAdd(src, dst)
+    code = AssignAdd(dst, src)
     for i, b, e in reversed(ranges):
         code = ForRange(i, b, e, code)
     return code
 
 
-def scale_loop(src, dst, ranges):
-    """Generate a nested loop over a list of ranges, multiplying dst with src in the innermost loop.
+def scale_loop(dst, factor, ranges):
+    """Generate a nested loop over a list of ranges, multiplying dst with factor in the innermost loop.
 
     Ranges is a list on the format [(index, begin, end),...].
     """
-    code = AssignMul(src, dst)
+    code = AssignMul(dst, factor)
     for i, b, e in reversed(ranges):
         code = ForRange(i, b, e, code)
     return code
 
 
+def is_zero_cexpr(cexpr):
+    return (
+        (isinstance(cexpr, LiteralFloat) and cexpr.value == 0.0)
+        or (isinstance(cexpr, LiteralInt) and cexpr.value == 0)
+        )
+
+
+def is_one_cexpr(cexpr):
+    return (
+        (isinstance(cexpr, LiteralFloat) and cexpr.value == 1.0)
+        or (isinstance(cexpr, LiteralInt) and cexpr.value == 1)
+        )
+
+
+def is_negative_one_cexpr(cexpr):
+    return (
+        (isinstance(cexpr, LiteralFloat) and cexpr.value == -1.0)
+        or (isinstance(cexpr, LiteralInt) and cexpr.value == -1)
+        )
+
+
+def float_product(factors):
+    "Build product of float factors, simplifying ones and zeros and returning 1.0 if empty sequence."
+    factors = [f for f in factors if not is_one_cexpr(f)]
+    if len(factors) == 0:
+        return LiteralFloat(1.0)
+    elif len(factors) == 1:
+        return factors[0]
+    else:
+        for f in factors:
+            if is_zero_cexpr(f):
+                return f
+        return Product(factors)
+
+
+def MemZeroRange(name, begin, end):
+    name = as_cexpr(name)
+    return Call("std::fill", (
+        AddressOf(name[begin]),
+        AddressOf(name[end]),
+        LiteralFloat(0.0)))
+
+
+def MemZero(name, size):
+    name = as_cexpr(name)
+    size = as_cexpr(size)
+    return Call("std::fill", (
+        name,
+        name + size,
+        LiteralFloat(0.0)))
+
+
 ############## CNode core
 
 class CNode(object):
@@ -85,6 +138,14 @@ class CNode(object):
         name = self.__class__.__name__
         raise NotImplementedError("Missing implementation of __str__ in " + name)
 
+    def __eq__(self, other):
+        name = self.__class__.__name__
+        raise NotImplementedError("Missing implementation of __eq__ in " + name)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
 CNode.debug = False
 
 
@@ -97,7 +158,7 @@ class CExpr(CNode):
     """
     __slots__ = ()
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         raise NotImplementedError("Missing implementation of ce_format() in CExpr.")
 
     def __str__(self):
@@ -114,37 +175,95 @@ class CExpr(CNode):
         return ArrayAccess(self, indices)
 
     def __neg__(self):
+        if isinstance(self, LiteralFloat):
+            return LiteralFloat(-self.value)
+        if isinstance(self, LiteralInt):
+            return LiteralInt(-self.value)
         return Neg(self)
 
     def __add__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            return other
+        if is_zero_cexpr(other):
+            return self
+        if isinstance(other, Neg):
+            return Sub(self, other.arg)
         return Add(self, other)
 
     def __radd__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            return other
+        if is_zero_cexpr(other):
+            return self
+        if isinstance(self, Neg):
+            return Sub(other, self.arg)
         return Add(other, self)
 
     def __sub__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            return -other
+        if is_zero_cexpr(other):
+            return self
+        if isinstance(other, Neg):
+            return Add(self, other.arg)
         return Sub(self, other)
 
     def __rsub__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            return other
+        if is_zero_cexpr(other):
+            return -self
+        if isinstance(self, Neg):
+            return Add(other, self.arg)
         return Sub(other, self)
 
     def __mul__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            return self
+        if is_zero_cexpr(other):
+            return other
+        if is_negative_one_cexpr(other):
+            return Neg(self)
+        if is_negative_one_cexpr(self):
+            return Neg(other)
         return Mul(self, other)
 
     def __rmul__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            return self
+        if is_zero_cexpr(other):
+            return other
+        if is_negative_one_cexpr(other):
+            return Neg(self)
+        if is_negative_one_cexpr(self):
+            return Neg(other)
         return Mul(other, self)
 
     def __div__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(other):
+            raise ValueError("Division by zero!")
+        if is_zero_cexpr(self):
+            return self
         return Div(self, other)
 
     def __rdiv__(self, other):
+        other = as_cexpr(other)
+        if is_zero_cexpr(self):
+            raise ValueError("Division by zero!")
+        if is_zero_cexpr(other):
+            return other
         return Div(other, self)
 
-    def __truediv__(self, other):
-        return Div(self, other)
+    __truediv__ = __div__
 
-    def __rtruediv__(self, other):
-        return Div(other, self)
+    __rtruediv__ = __rdiv__
 
     def __floordiv__(self, other):
         return NotImplemented
@@ -155,29 +274,30 @@ class CExpr(CNode):
 
 class CExprOperator(CExpr):
     """Base class for all C expression operator."""
-    __slots__ = ("children",)
+    __slots__ = ()
     sideeffect = False
 
 
 class CExprTerminal(CExpr):
     """Base class for all C expression terminals."""
     __slots__ = ()
-    children = ()
+    sideeffect = False
 
 
 ############## CExprTerminal types
 
 class CExprLiteral(CExprTerminal):
     "A float or int literal value."
-    __slots__ = ("value",)
+    __slots__ = ()
     precedence = PRECEDENCE.LITERAL
 
+
 class Null(CExprLiteral):
     "A null pointer literal."
-    __slots__ = ("value",)
+    __slots__ = ()
     precedence = PRECEDENCE.LITERAL
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         # C or old C++ version
         #return "NULL"
         # C++11 version
@@ -186,6 +306,7 @@ class Null(CExprLiteral):
     def __eq__(self, other):
         return isinstance(other, Null)
 
+
 class LiteralFloat(CExprLiteral):
     "A floating point literal value."
     __slots__ = ("value",)
@@ -195,18 +316,21 @@ class LiteralFloat(CExprLiteral):
         assert isinstance(value, (float, int, numpy.number))
         self.value = value
 
-    def ce_format(self):
-        return format_float(self.value)
+    def ce_format(self, precision=None):
+        return format_float(self.value, precision)
 
     def __eq__(self, other):
         return isinstance(other, LiteralFloat) and self.value == other.value
 
-    def __nonzero__(self):
+    def __bool__(self):
         return bool(self.value)
 
+    __nonzero__ = __bool__
+
     def __float__(self):
         return float(self.value)
 
+
 class LiteralInt(CExprLiteral):
     "An integer literal value."
     __slots__ = ("value",)
@@ -216,18 +340,24 @@ class LiteralInt(CExprLiteral):
         assert isinstance(value, (int, numpy.number))
         self.value = value
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         return str(self.value)
 
     def __eq__(self, other):
         return isinstance(other, LiteralInt) and self.value == other.value
 
-    def __nonzero__(self):
+    def __bool__(self):
         return bool(self.value)
 
+    __nonzero__ = __bool__
+
     def __int__(self):
         return int(self.value)
 
+    def __float__(self):
+        return float(self.value)
+
+
 class LiteralBool(CExprLiteral):
     "A boolean literal value."
     __slots__ = ("value",)
@@ -237,18 +367,18 @@ class LiteralBool(CExprLiteral):
         assert isinstance(value, (bool,))
         self.value = value
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         return "true" if self.value else "false"
 
     def __eq__(self, other):
         return isinstance(other, LiteralBool) and self.value == other.value
 
-    def __nonzero__(self):
-        return bool(self.value)
-
     def __bool__(self):
         return bool(self.value)
 
+    __nonzero__ = __bool__
+
+
 class LiteralString(CExprLiteral):
     "A boolean literal value."
     __slots__ = ("value",)
@@ -259,7 +389,7 @@ class LiteralString(CExprLiteral):
         assert '"' not in value
         self.value = value
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         return '"%s"' % (self.value,)
 
     def __eq__(self, other):
@@ -275,9 +405,13 @@ class Symbol(CExprTerminal):
         assert isinstance(name, string_types)
         self.name = name
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         return self.name
 
+    def __eq__(self, other):
+        return isinstance(other, Symbol) and self.name == other.name
+
+
 class VerbatimExpr(CExprTerminal):
     """A verbatim copy of an expression source string.
 
@@ -289,18 +423,25 @@ class VerbatimExpr(CExprTerminal):
         assert isinstance(codestring, string_types)
         self.codestring = codestring
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         return self.codestring
 
+    def __eq__(self, other):
+        return isinstance(other, VerbatimExpr) and self.codestring == other.codestring
+
+
 class New(CExpr):
     __slots__ = ("typename",)
     def __init__(self, typename):
         assert isinstance(typename, string_types)
         self.typename = typename
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         return "new %s()" % (self.typename,)
 
+    def __eq__(self, other):
+        return isinstance(other, New) and self.typename == other.typename
+
 
 ############## CExprOperator base classes
 
@@ -310,34 +451,46 @@ class UnaryOp(CExprOperator):
     def __init__(self, arg):
         self.arg = as_cexpr(arg)
 
+    def __eq__(self, other):
+        return isinstance(other, type(self)) and self.arg == other.arg
+
+
 class PrefixUnaryOp(UnaryOp):
     "Base class for prefix unary operators."
     __slots__ = ()
-    def ce_format(self):
-        arg = self.arg.ce_format()
+    def ce_format(self, precision=None):
+        arg = self.arg.ce_format(precision)
         if self.arg.precedence >= self.precedence:
             arg = '(' + arg + ')'
         return self.op + arg
 
+    def __eq__(self, other):
+        return isinstance(other, type(self))
+
+
 class PostfixUnaryOp(UnaryOp):
     "Base class for postfix unary operators."
     __slots__ = ()
-    def ce_format(self):
-        arg = self.arg.ce_format()
+    def ce_format(self, precision=None):
+        arg = self.arg.ce_format(precision)
         if self.arg.precedence >= self.precedence:
             arg = '(' + arg + ')'
         return arg + self.op
 
+    def __eq__(self, other):
+        return isinstance(other, type(self))
+
+
 class BinOp(CExprOperator):
     __slots__ = ("lhs", "rhs")
     def __init__(self, lhs, rhs):
         self.lhs = as_cexpr(lhs)
         self.rhs = as_cexpr(rhs)
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         # Format children
-        lhs = self.lhs.ce_format()
-        rhs = self.rhs.ce_format()
+        lhs = self.lhs.ce_format(precision)
+        rhs = self.rhs.ce_format(precision)
 
         # Apply parentheses
         if self.lhs.precedence > self.precedence:
@@ -348,15 +501,21 @@ class BinOp(CExprOperator):
         # Return combined string
         return lhs + (" " + self.op + " ") + rhs
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.lhs == other.lhs
+                    and self.rhs == other.rhs)
+
+
 class NaryOp(CExprOperator):
     "Base class for special n-ary operators."
     __slots__ = ("args",)
     def __init__(self, args):
         self.args = [as_cexpr(arg) for arg in args]
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         # Format children
-        args = [arg.ce_format() for arg in self.args]
+        args = [arg.ce_format(precision) for arg in self.args]
 
         # Apply parentheses
         for i in range(len(args)):
@@ -370,6 +529,12 @@ class NaryOp(CExprOperator):
             s += op + args[i]
         return s
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and len(self.args) == len(other.args)
+                    and all(a == b for a, b in zip(self.args, other.args)))
+
+
 ############## CExpr unary operators
 
 class Dereference(PrefixUnaryOp):
@@ -628,15 +793,20 @@ class FlattenedArray(object):
         if not isinstance(indices, (list,tuple)):
             indices = (indices,)
         n = len(indices)
-        i, s = (indices[0], self.strides[0])
-        literal_one = LiteralInt(1)
-
-        flat = (i if s == literal_one else s * i)
-        if self.offset is not None:
-            flat = self.offset + flat
-        for i, s in zip(indices[1:n], self.strides[1:n]):
-            flat = flat + (i if s == literal_one else s * i)
-
+        if n == 0:
+            # Handle scalar case, allowing dims=() and indices=() for A[0]
+            if len(self.strides) != 0:
+                raise ValueError("Empty indices for nonscalar array.")
+            flat = LiteralInt(0)
+        else:
+            i, s = (indices[0], self.strides[0])
+            literal_one = LiteralInt(1)
+            flat = (i if s == literal_one else s * i)
+            if self.offset is not None:
+                flat = self.offset + flat
+            for i, s in zip(indices[1:n], self.strides[1:n]):
+                flat = flat + (i if s == literal_one else s * i)
+        # Delay applying ArrayAccess until we have all indices
         if n == len(self.strides):
             return ArrayAccess(self.array, flat)
         else:
@@ -684,12 +854,18 @@ class ArrayAccess(CExprOperator):
             indices = (indices,)
         return ArrayAccess(self.array, self.indices + indices)
 
-    def ce_format(self):
-        s = self.array.ce_format()
+    def ce_format(self, precision=None):
+        s = self.array.ce_format(precision)
         for index in self.indices:
-            s += "[" + index.ce_format() + "]"
+            s += "[" + index.ce_format(precision) + "]"
         return s
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.array == other.array
+                    and self.indices == other.indices)
+
+
 class Conditional(CExprOperator):
     __slots__ = ("condition", "true", "false")
     precedence = PRECEDENCE.CONDITIONAL
@@ -699,11 +875,11 @@ class Conditional(CExprOperator):
         self.true = as_cexpr(true)
         self.false = as_cexpr(false)
 
-    def ce_format(self):
+    def ce_format(self, precision=None):
         # Format children
-        c = self.condition.ce_format()
-        t = self.true.ce_format()
-        f = self.false.ce_format()
+        c = self.condition.ce_format(precision)
+        t = self.true.ce_format(precision)
+        f = self.false.ce_format(precision)
 
         # Apply parentheses
         if self.condition.precedence >= self.precedence:
@@ -716,6 +892,12 @@ class Conditional(CExprOperator):
         # Return combined string
         return c + " ? " + t + " : " + f
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.condition == other.condition
+                    and self.true == other.true
+                    and self.false == other.false)
+
 
 class Call(CExprOperator):
     __slots__ = ("function", "arguments")
@@ -733,19 +915,20 @@ class Call(CExprOperator):
             arguments = (arguments,)
         self.arguments = [as_cexpr(arg) for arg in arguments]
 
-    def ce_format(self):
-        args = ", ".join(arg.ce_format() for arg in self.arguments)
-        return self.function.ce_format() + "(" + args + ")"
-
+    def ce_format(self, precision=None):
+        args = ", ".join(arg.ce_format(precision) for arg in self.arguments)
+        return self.function.ce_format(precision) + "(" + args + ")"
 
-############## Convertion function to expression nodes
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.function == other.function
+                    and self.arguments == other.arguments)
 
-number_types = (int, float, complex, numpy.number)
 
+############## Convertion function to expression nodes
 
-def _is_zero(values):
-    global number_types
-    if isinstance(values, number_types + (LiteralFloat, LiteralInt)):
+def _is_zero_valued(values):
+    if isinstance(values, (numbers.Number, LiteralFloat, LiteralInt)):
         return float(values) == 0.0
     else:
         return numpy.count_nonzero(values) == 0
@@ -756,7 +939,6 @@ def as_cexpr(node):
 
     Accepts CExpr nodes, treats int and float as literals, and treats a string as a symbol.
     """
-    global number_types
     if isinstance(node, CExpr):
         return node
     elif isinstance(node, (int, numpy.integer)):
@@ -810,9 +992,9 @@ class CStatement(CNode):
 
     Subtypes do _not_ define a 'precedence' class attribute.
     """
-    __slots__ = ("children",)
+    __slots__ = ()
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         "Return S: string | list(S) | Indented(S)."
         raise NotImplementedError("Missing implementation of cs_format() in CStatement.")
 
@@ -836,9 +1018,13 @@ class VerbatimStatement(CStatement):
         assert isinstance(codestring, string_types)
         self.codestring = codestring
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return self.codestring
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.codestring == other.codestring)
+
 
 class Statement(CStatement):
     "Make an expression into a statement."
@@ -846,8 +1032,12 @@ class Statement(CStatement):
     def __init__(self, expr):
         self.expr = as_cexpr(expr)
 
-    def cs_format(self):
-        return self.expr.ce_format() + ";"
+    def cs_format(self, precision=None):
+        return self.expr.ce_format(precision) + ";"
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.expr == other.expr)
 
 
 class StatementList(CStatement):
@@ -856,8 +1046,12 @@ class StatementList(CStatement):
     def __init__(self, statements):
         self.statements = [as_cstatement(st) for st in statements]
 
-    def cs_format(self):
-        return [st.cs_format() for st in self.statements]
+    def cs_format(self, precision=None):
+        return [st.cs_format(precision) for st in self.statements]
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.statements == other.statements)
 
 
 ############## Simple statements
@@ -868,29 +1062,43 @@ class Using(CStatement):
         assert isinstance(name, string_types)
         self.name = name
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return "using " + self.name + ";"
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.name == other.name)
+
 
 class Break(CStatement):
     __slots__ = ()
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return "break;"
 
+    def __eq__(self, other):
+        return isinstance(other, type(self))
+
 
 class Continue(CStatement):
     __slots__ = ()
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return "continue;"
 
+    def __eq__(self, other):
+        return isinstance(other, type(self))
+
 
 class Return(CStatement):
     __slots__ = ("value",)
     def __init__(self, value):
         self.value = as_cexpr(value)
 
-    def cs_format(self):
-        return "return " + self.value.ce_format() + ";"
+    def cs_format(self, precision=None):
+        return "return " + self.value.ce_format(precision) + ";"
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.value == other.value)
 
 
 class Case(CStatement):
@@ -899,15 +1107,22 @@ class Case(CStatement):
         # NB! This is too permissive and will allow invalid case arguments.
         self.value = as_cexpr(value)
 
-    def cs_format(self):
-        return "case " + self.value.ce_format() + ":"
+    def cs_format(self, precision=None):
+        return "case " + self.value.ce_format(precision) + ":"
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.value == other.value)
 
 
 class Default(CStatement):
     __slots__ = ()
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return "default:"
 
+    def __eq__(self, other):
+        return isinstance(other, type(self))
+
 
 class Throw(CStatement):
     __slots__ = ("exception", "message")
@@ -917,10 +1132,15 @@ class Throw(CStatement):
         self.exception = exception
         self.message = message
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         assert '"' not in self.message
         return "throw " + self.exception + '("' + self.message + '");'
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.message == other.message
+                    and self.exception == other.exception)
+
 
 class Comment(CStatement):
     "Line comment(s) used for annotating the generated code with human readable remarks."
@@ -929,10 +1149,14 @@ class Comment(CStatement):
         assert isinstance(comment, string_types)
         self.comment = comment
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         lines = self.comment.strip().split("\n")
         return ["// " + line.strip() for line in lines]
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.comment == other.comment)
+
 
 def commented_code_list(code, comments):
     "Convenience wrapper for adding comment to code list if the list is not empty."
@@ -954,10 +1178,14 @@ class Pragma(CStatement):  # TODO: Improve on this with a use case later
         assert isinstance(comment, string_types)
         self.comment = comment
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         assert "\n" not in self.comment
         return "#pragma " + self.comment
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.comment == other.comment)
+
 
 ############## Type and variable declarations
 
@@ -977,31 +1205,54 @@ class VariableDecl(CStatement):
             value = as_cexpr(value)
         self.value = value
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         code = self.typename + " " + self.symbol.name
         if self.value is not None:
-            code += " = " + self.value.ce_format()
+            code += " = " + self.value.ce_format(precision)
         return code + ";"
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.typename == other.typename
+                    and self.symbol == other.symbol
+                    and self.value == other.value)
+
 
 def leftover(size, padlen):
     "Return minimum integer to add to size to make it divisible by padlen."
     return (padlen - (size % padlen)) % padlen
 
 
-def build_1d_initializer_list(values, formatter, padlen=0):
+def pad_dim(dim, padlen):
+    "Make dim divisible by padlen."
+    return ((dim + padlen - 1) // padlen) * padlen
+
+
+def pad_innermost_dim(shape, padlen):
+    "Make the last dimension in shape divisible by padlen."
+    if not shape:
+        return ()
+    shape = list(shape)
+    if padlen:
+        shape[-1] = pad_dim(shape[-1], padlen)
+    return tuple(shape)
+
+
+def build_1d_initializer_list(values, formatter, padlen=0, precision=None):
     '''Return a list containing a single line formatted like "{ 0.0, 1.0, 2.0 }"'''
+    if formatter == str:
+        formatter = lambda x, p: str(x)
     tokens = ["{ "]
     if numpy.product(values.shape) > 0:
         sep = ", "
-        fvalues = [formatter(v) for v in values]
+        fvalues = [formatter(v, precision) for v in values]
         for v in fvalues[:-1]:
             tokens.append(v)
             tokens.append(sep)
         tokens.append(fvalues[-1])
         if padlen:
             # Add padding
-            zero = formatter(values.dtype(0))
+            zero = formatter(values.dtype.type(0), precision)
             for i in range(leftover(len(values), padlen)):
                 tokens.append(sep)
                 tokens.append(zero)
@@ -1009,7 +1260,7 @@ def build_1d_initializer_list(values, formatter, padlen=0):
     return "".join(tokens)
 
 
-def build_initializer_lists(values, sizes, level, formatter, padlen=0):
+def build_initializer_lists(values, sizes, level, formatter, padlen=0, precision=None):
     """Return a list of lines with initializer lists for a multidimensional array.
 
     Example output::
@@ -1017,6 +1268,8 @@ def build_initializer_lists(values, sizes, level, formatter, padlen=0):
         { { 0.0, 0.1 },
           { 1.0, 1.1 } }
     """
+    if formatter == str:
+        formatter = lambda x, p: str(x)
     values = numpy.asarray(values)
     assert numpy.product(values.shape) == numpy.product(sizes)
     assert len(sizes) > 0
@@ -1027,12 +1280,12 @@ def build_initializer_lists(values, sizes, level, formatter, padlen=0):
     r = len(sizes)
     assert r > 0
     if r == 1:
-        return [build_1d_initializer_list(values, formatter, padlen=padlen)]
+        return [build_1d_initializer_list(values, formatter, padlen=padlen, precision=precision)]
     else:
         # Render all sublists
         parts = []
         for val in values:
-            sublist = build_initializer_lists(val, sizes[1:], level+1, formatter, padlen=padlen)
+            sublist = build_initializer_lists(val, sizes[1:], level+1, formatter, padlen=padlen, precision=precision)
             parts.append(sublist)
         # Add comma after last line in each part except the last one
         for part in parts[:-1]:
@@ -1086,11 +1339,9 @@ class ArrayDecl(CStatement):
         """
         return ArrayAccess(self, indices)
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         # Pad innermost array dimension
-        sizes = list(self.sizes)
-        if self.padlen:
-            sizes[-1] += leftover(sizes[-1], self.padlen)
+        sizes = pad_innermost_dim(self.sizes, self.padlen)
 
         # Add brackets
         brackets = ''.join("[%d]" % n for n in sizes)
@@ -1107,7 +1358,7 @@ class ArrayDecl(CStatement):
         if self.values is None:
             # Undefined initial values
             return decl + ";"
-        elif _is_zero(self.values):
+        elif _is_zero_valued(self.values):
             # Zero initial values
             # (NB! C++ style zero initialization, not sure about other target languages)
             return decl + " = {};"
@@ -1115,17 +1366,26 @@ class ArrayDecl(CStatement):
             # Construct initializer lists for arbitrary multidimensional array values
             if self.values.dtype.kind == "f":
                 formatter = format_float
+            elif self.values.dtype.kind == "i":
+                formatter = format_int
             else:
-                # Not really using other types, this can be buggy
                 formatter = format_value
             initializer_lists = build_initializer_lists(self.values, self.sizes, 0,
-                                                        formatter, padlen=self.padlen)
+                                                        formatter, padlen=self.padlen,
+                                                        precision=precision)
             if len(initializer_lists) == 1:
                 return decl + " = " + initializer_lists[0] + ";"
             else:
                 initializer_lists[-1] += ";" # Close statement on final line
                 return (decl + " =", Indented(initializer_lists))
 
+    def __eq__(self, other):
+        attributes = ("typename", "symbol", "sizes", "alignas", "padlen", "values")
+        return (isinstance(other, type(self))
+                    and all(getattr(self, name) == getattr(self, name)
+                            for name in attributes))
+
+
 
 ############## Scoped statements
 
@@ -1134,8 +1394,12 @@ class Scope(CStatement):
     def __init__(self, body):
         self.body = as_cstatement(body)
 
-    def cs_format(self):
-        return ("{", Indented(self.body.cs_format()), "}")
+    def cs_format(self, precision=None):
+        return ("{", Indented(self.body.cs_format(precision)), "}")
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.body == other.body)
 
 
 class Namespace(CStatement):
@@ -1145,9 +1409,14 @@ class Namespace(CStatement):
         self.name = name
         self.body = as_cstatement(body)
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return ("namespace " + self.name,
-                "{", Indented(self.body.cs_format()), "}")
+                "{", Indented(self.body.cs_format(precision)), "}")
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.name == other.name
+                    and self.body == other.body)
 
 
 class If(CStatement):
@@ -1156,9 +1425,14 @@ class If(CStatement):
         self.condition = as_cexpr(condition)
         self.body = as_cstatement(body)
 
-    def cs_format(self):
-        return ("if (" + self.condition.ce_format() + ")",
-                "{", Indented(self.body.cs_format()), "}")
+    def cs_format(self, precision=None):
+        return ("if (" + self.condition.ce_format(precision) + ")",
+                "{", Indented(self.body.cs_format(precision)), "}")
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.condition == other.condition
+                    and self.body == other.body)
 
 
 class ElseIf(CStatement):
@@ -1167,9 +1441,14 @@ class ElseIf(CStatement):
         self.condition = as_cexpr(condition)
         self.body = as_cstatement(body)
 
-    def cs_format(self):
-        return ("else if (" + self.condition.ce_format() + ")",
-                "{", Indented(self.body.cs_format()), "}")
+    def cs_format(self, precision=None):
+        return ("else if (" + self.condition.ce_format(precision) + ")",
+                "{", Indented(self.body.cs_format(precision)), "}")
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.condition == other.condition
+                    and self.body == other.body)
 
 
 class Else(CStatement):
@@ -1177,9 +1456,13 @@ class Else(CStatement):
     def __init__(self, body):
         self.body = as_cstatement(body)
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         return ("else",
-                "{", Indented(self.body.cs_format()), "}")
+                "{", Indented(self.body.cs_format(precision)), "}")
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.body == other.body)
 
 
 class While(CStatement):
@@ -1188,9 +1471,14 @@ class While(CStatement):
         self.condition = as_cexpr(condition)
         self.body = as_cstatement(body)
 
-    def cs_format(self):
-        return ("while (" + self.condition.ce_format() + ")",
-                "{", Indented(self.body.cs_format()), "}")
+    def cs_format(self, precision=None):
+        return ("while (" + self.condition.ce_format(precision) + ")",
+                "{", Indented(self.body.cs_format(precision)), "}")
+
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.condition == other.condition
+                    and self.body == other.body)
 
 
 class Do(CStatement):
@@ -1199,31 +1487,70 @@ class Do(CStatement):
         self.condition = as_cexpr(condition)
         self.body = as_cstatement(body)
 
-    def cs_format(self):
-        return ("do", "{", Indented(self.body.cs_format()),
-                "} while (" + self.condition.ce_format() + ");")
+    def cs_format(self, precision=None):
+        return ("do", "{", Indented(self.body.cs_format(precision)),
+                "} while (" + self.condition.ce_format(precision) + ");")
 
+    def __eq__(self, other):
+        return (isinstance(other, type(self))
+                    and self.condition == other.condition
+                    and self.body == other.body)
+
+def as_pragma(pragma):
+    if isinstance(pragma, string_types):
+        return Pragma(pragma)
+    elif isinstance(pragma, Pragma):
+        return pragma
+    return None
+
+def is_simple_inner_loop(code):
+    if isinstance(code, ForRange) and code.pragma is None:
+        return True
+    if isinstance(code, For) and code.pragma is None:
+        return True
+    if isinstance(code, Statement) and isinstance(code.expr, AssignOp):
+        return True
+    return False
 
 class For(CStatement):
-    __slots__ = ("init", "check", "update", "body")
-    def __init__(self, init, check, update, body):
+    __slots__ = ("init", "check", "update", "body", "pragma")
+    def __init__(self, init, check, update, body, pragma=None):
         self.init = as_cstatement(init)
         self.check = as_cexpr(check)
         self.update = as_cexpr(update)
         self.body = as_cstatement(body)
+        self.pragma = as_pragma(pragma)
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         # The C model here is a bit crude and this causes trouble
         # in the init statement/expression here:
-        init = self.init.cs_format()
+        init = self.init.cs_format(precision)
         assert isinstance(init, string_types)
-        assert init.rstrip().endswith(";")
+        init = init.rstrip(" ;")
 
-        check = self.check.ce_format()
-        update = self.update.ce_format()
-        body = self.body.cs_format()
-        return ("for (" + init + " " + check + "; " + update + ")",
-                "{", Indented(body), "}")
+        check = self.check.ce_format(precision)
+        update = self.update.ce_format(precision)
+
+        prelude = "for (" + init + "; " + check + "; " + update + ")"
+        body = Indented(self.body.cs_format(precision))
+
+        # Reduce size of code with lots of simple loops by dropping {} in obviously safe cases
+        if is_simple_inner_loop(self.body):
+            code = (prelude, body)
+        else:
+            code = (prelude, "{", body, "}")
+
+        # Add pragma prefix if requested
+        if self.pragma is not None:
+            code = (self.pragma.cs_format(),) + code
+
+        return code
+
+    def __eq__(self, other):
+        attributes = ("init", "check", "update", "body")
+        return (isinstance(other, type(self))
+                    and all(getattr(self, name) == getattr(self, name)
+                            for name in attributes))
 
 
 class Switch(CStatement):
@@ -1239,11 +1566,11 @@ class Switch(CStatement):
         self.autobreak = autobreak
         self.autoscope = autoscope
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         cases = []
         for case in self.cases:
-            caseheader = "case " + case[0].ce_format() + ":"
-            casebody = case[1].cs_format()
+            caseheader = "case " + case[0].ce_format(precision) + ":"
+            casebody = case[1].cs_format(precision)
             if self.autoscope:
                 casebody = ("{", Indented(casebody), "}")
             if self.autobreak:
@@ -1252,47 +1579,80 @@ class Switch(CStatement):
 
         if self.default is not None:
             caseheader = "default:"
-            casebody = self.default.cs_format()
+            casebody = self.default.cs_format(precision)
             if self.autoscope:
                 casebody = ("{", Indented(casebody), "}")
             cases.extend([caseheader, Indented(casebody)])
 
-        return ("switch (" + self.arg.ce_format() + ")",
+        return ("switch (" + self.arg.ce_format(precision) + ")",
                 "{", cases, "}")
 
+    def __eq__(self, other):
+        attributes = ("arg", "cases", "default", "autobreak", "autoscope")
+        return (isinstance(other, type(self))
+                    and all(getattr(self, name) == getattr(self, name)
+                            for name in attributes))
+
 
 class ForRange(CStatement):
     "Slightly higher-level for loop assuming incrementing an index over a range."
-    __slots__ = ("index", "begin", "end", "body", "index_type")
-    def __init__(self, index, begin, end, body):
+    __slots__ = ("index", "begin", "end", "body", "pragma", "index_type")
+    def __init__(self, index, begin, end, body, vectorize=None):
         self.index = as_cexpr(index)
         self.begin = as_cexpr(begin)
         self.end = as_cexpr(end)
         self.body = as_cstatement(body)
 
+        if vectorize:
+            pragma = Pragma("omp simd")
+        else:
+            pragma = None
+        self.pragma = pragma
+
         # Could be configured if needed but not sure how we're
         # going to handle type information right now:
         self.index_type = "int"
 
-    def cs_format(self):
+    def cs_format(self, precision=None):
         indextype = self.index_type
-        index = self.index.ce_format()
-        begin = self.begin.ce_format()
-        end = self.end.ce_format()
+        index = self.index.ce_format(precision)
+        begin = self.begin.ce_format(precision)
+        end = self.end.ce_format(precision)
 
         init = indextype + " " + index + " = " + begin
         check = index + " < " + end
         update = "++" + index
 
-        return ("for (" + init + "; " + check + "; " + update + ")",
-                "{", Indented(self.body.cs_format()), "}")
+        prelude = "for (" + init + "; " + check + "; " + update + ")"
+        body = Indented(self.body.cs_format(precision))
+
+        # Reduce size of code with lots of simple loops by dropping {} in obviously safe cases
+        if is_simple_inner_loop(self.body):
+            code = (prelude, body)
+        else:
+            code = (prelude, "{", body, "}")
+
+        # Add vectorization hint if requested
+        if self.pragma is not None:
+            code = (self.pragma.cs_format(),) + code
+
+        return code
+
+    def __eq__(self, other):
+        attributes = ("index", "begin", "end", "body", "pragma", "index_type")
+        return (isinstance(other, type(self))
+                    and all(getattr(self, name) == getattr(self, name)
+                            for name in attributes))
 
 
 ############## Convertion function to statement nodes
 
 def as_cstatement(node):
     "Perform type checking on node and wrap in a suitable statement type if necessary."
-    if isinstance(node, CStatement):
+    if isinstance(node, StatementList) and len(node.statements) == 1:
+        # Cleans up the expression tree a bit
+        return node.statements[0]
+    elif isinstance(node, CStatement):
         # No-op
         return node
     elif isinstance(node, CExprOperator):
@@ -1305,7 +1665,11 @@ def as_cstatement(node):
                 % (type(node), str(node)))
     elif isinstance(node, list):
         # Convenience case for list of statements
-        return StatementList(node)
+        if len(node) == 1:
+            # Cleans up the expression tree a bit
+            return as_cstatement(node[0])
+        else:
+            return StatementList(node)
     elif isinstance(node, string_types):
         # Backdoor for flexibility in code generation to allow verbatim pasted statements
         return VerbatimStatement(node)
diff --git a/ffc/uflacs/language/format_value.py b/ffc/uflacs/language/format_value.py
index fa38ce1..05092f9 100644
--- a/ffc/uflacs/language/format_value.py
+++ b/ffc/uflacs/language/format_value.py
@@ -17,77 +17,33 @@
 # along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
 
 from six import string_types
+import numbers
 import re
-import numpy
-#from ffc.log import info
-
-_float_threshold = None
-_float_precision = None
-_float_fmt = "%r"
-
-
-def set_float_precision(precision, threshold=None):
-    "Configure float formatting precision and zero threshold."
-    global _float_precision, _float_threshold, _float_fmt
-    if threshold is None:
-        threshold = 10.0**-(precision-1)  # Matching FFC behaviour, I'd like to drop the -1 here
-    _float_precision = precision
-    _float_threshold = threshold
-    #_float_fmt = "{{:.{0:d}e}}".format(_float_precision)
-    if _float_precision is None:
-        _float_fmt = "%r"
-    else:
-        _float_fmt = "%%.%dg" % _float_precision
-    #info("Setting float precision to %d in uflacs." % (precision,))
-
-
-def get_float_precision():
-    return _float_precision
-
-
-def get_float_threshold():
-    return _float_threshold
-
-
-def reset_float_precision():
-    "Set float precision and zero threshold back to default."
-    set_float_precision(15)
-
-
-# Execute default on startup
-reset_float_precision()
-
 
 _subs = (
-    # Remove trailing spaces (using .strip() for this)
-    #(re.compile(r"^[ ]*([^ ])[ ]*$"), "\1"),
-    # Remove 0s before e
-    (re.compile(r"0+e"), "e"),
     # Remove 0s after e+ or e-
-    (re.compile(r"e([\+\-])0+"), r"e\1"),
-    # Remove e+/e- at end (zero removal above can produce this)
-    (re.compile(r"e[\+\-]$"), ""),
-    # Replace lonely . at end with .0 for consistency
-    (re.compile("\\.$"), ".0"),
-    # Append .0 if it's an integer
-    (re.compile(r"^([\+\-]{,1}\d+)$"), r"\1.0"),
+    (re.compile(r"e[\+]0*(.)"), r"e\1"),
+    (re.compile(r"e[\-]0*(.)"), r"e-\1"),
     )
-def format_float(x):
-    "Format a float value according to set_float_precision."
-    global _float_threshold, _float_fmt, _subs
-
-    if _float_threshold is not None and abs(x) < _float_threshold:
-        return "0.0"
+def format_float(x, precision=None):
+    "Format a float value according to given precision."
+    global _subs
+    if precision:
+        s = "{:.{prec}}".format(float(x), prec=precision)
     else:
-        s = (_float_fmt % x).strip()
-        for r, v in _subs:
-            s = r.sub(v, s)
-        return s
+        # Using "{}".format(float(x)) apparently results
+        # in lesser precision in python 2 than python 3
+        s = repr(float(x))
+    for r, v in _subs:
+        s = r.sub(v, s)
+    return s
+
+
+def format_int(x, precision=None):
+    return str(x)
 
 
-_ints = (int, numpy.integer)
-_floats = (float, numpy.floating)
-def format_value(value):
+def format_value(value, precision=None):
     """Format a literal value as s tring.
 
     - float: Formatted according to current precision configuration.
@@ -97,12 +53,13 @@ def format_value(value):
     - str: Wrapped in "quotes".
 
     """
-    global _floats, _ints
-    if isinstance(value, _floats):
-        return format_float(float(value))
-    elif isinstance(value, _ints):
-        return str(int(value))
+    if isinstance(value, numbers.Real):
+        return format_float(float(value), precision=precision)
+    elif isinstance(value, numbers.Integral):
+        return format_int(int(value))
     elif isinstance(value, string_types):
+        # FIXME: Is this ever used?
+        assert '"' not in value
         return '"' + value + '"'
     elif hasattr(value, "ce_format"):
         return value.ce_format()
diff --git a/ffc/uflacs/language/ufl_to_cnodes.py b/ffc/uflacs/language/ufl_to_cnodes.py
index fbb2db3..3a1b926 100644
--- a/ffc/uflacs/language/ufl_to_cnodes.py
+++ b/ffc/uflacs/language/ufl_to_cnodes.py
@@ -29,12 +29,31 @@ class UFL2CNodesMixin(object):
     def __init__(self, language):
         self.L = language
 
+        self.force_floats = False
+        self.enable_strength_reduction = False
+
     # === Error handlers for missing formatting rules ===
 
     def expr(self, o):
         "Generic fallback with error message for missing rules."
         error("Missing C++ formatting rule for expr type {0}.".format(o._ufl_class_))
 
+    # === Formatting rules for scalar literals ===
+
+    def zero(self, o):
+        return self.L.LiteralFloat(0.0)
+
+    #def complex_value(self, o):
+    #    return self.L.ComplexValue(complex(o))
+
+    def float_value(self, o):
+        return self.L.LiteralFloat(float(o))
+
+    def int_value(self, o):
+        if self.force_floats:
+            return self.float_value(o)
+        return self.L.LiteralInt(int(o))
+
     # === Formatting rules for arithmetic operators ===
 
     def sum(self, o, a, b):
@@ -47,7 +66,10 @@ class UFL2CNodesMixin(object):
         return self.L.Mul(a, b)
 
     def division(self, o, a, b):
-        return self.L.Div(a, b)
+        if self.enable_strength_reduction:
+            return self.L.Mul(a, self.L.Div(1.0, b))
+        else:
+            return self.L.Div(a, b)
 
     # === Formatting rules for conditional expressions ===
 
diff --git a/ffc/uflacs/representation/__init__.py b/ffc/uflacs/representation/__init__.py
deleted file mode 100644
index dc64a80..0000000
--- a/ffc/uflacs/representation/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Algorithms for the representation phase of form compilation."""
diff --git a/ffc/uflacs/representation/build_uflacs_ir.py b/ffc/uflacs/representation/build_uflacs_ir.py
deleted file mode 100644
index 37a7f4d..0000000
--- a/ffc/uflacs/representation/build_uflacs_ir.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2011-2016 Martin Sandve Alnæs
-#
-# This file is part of UFLACS.
-#
-# UFLACS is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# UFLACS is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
-
-"""Main algorithm for building the uflacs intermediate representation."""
-
-import numpy
-
-from ufl import product, as_ufl
-from ufl.log import error
-from ufl.checks import is_cellwise_constant
-from ufl.classes import CellCoordinate, FacetCoordinate, QuadratureWeight
-
-from ffc.uflacs.analysis.balancing import balance_modifiers
-from ffc.uflacs.analysis.modified_terminals import is_modified_terminal, analyse_modified_terminal
-from ffc.uflacs.analysis.graph import build_graph
-from ffc.uflacs.analysis.graph_vertices import build_scalar_graph_vertices
-from ffc.uflacs.analysis.graph_rebuild import rebuild_with_scalar_subexpressions
-from ffc.uflacs.analysis.graph_dependencies import compute_dependencies, mark_active, mark_image
-from ffc.uflacs.analysis.graph_ssa import compute_dependency_count, invert_dependencies
-#from ffc.uflacs.analysis.graph_ssa import default_cache_score_policy, compute_cache_scores, allocate_registers
-from ffc.uflacs.analysis.factorization import compute_argument_factorization
-from ffc.uflacs.elementtables.terminaltables import build_optimized_tables
-
-
-def build_uflacs_ir(cell, integral_type, entitytype,
-                    integrands, tensor_shape,
-                    coefficient_numbering,
-                    quadrature_rules, parameters):
-    ir = {}
-
-    # { ufl coefficient: count }
-    ir["coefficient_numbering"] = coefficient_numbering
-
-    rank = len(tensor_shape)
-    
-    # { num_points: expr_ir for one integrand }
-    ir["expr_irs"] = {}
-
-    # Build the core uflacs expression ir for each num_points/integrand
-    # TODO: Better to compute joint IR for all integrands
-    #       and deal with num_points later?
-    #       I.e. common_expr_ir = compute_common_expr_ir(integrands)
-    #       If we want to adjoint quadrature rules for subterms
-    #       automatically anyway, num_points should be advisory.
-    #       For now, expecting multiple num_points to be rare.
-    for num_points in sorted(integrands.keys()):
-        expressions = [integrands[num_points]]
-
-        # TODO: Apply this transformation to integrands earlier?
-        expressions = [balance_modifiers(expr) for expr in expressions]
-
-        # Build scalar list-based graph representation
-        V, V_deps, V_targets = build_scalar_graph(expressions)
-
-
-        # Build terminal_data from V here before factorization.
-        # Then we can use it to derive table properties for all modified terminals,
-        # and then use that to rebuild the scalar graph more efficiently before
-        # argument factorization. We can build terminal_data again after factorization
-        # if that's necessary.
-        initial_terminal_indices = [i for i, v in enumerate(V)
-                                    if is_modified_terminal(v)]
-        initial_terminal_data = [analyse_modified_terminal(V[i])
-                                 for i in initial_terminal_indices]
-        unique_tables, mt_table_ranges, table_types = \
-            build_optimized_tables(num_points, quadrature_rules,
-                cell, integral_type, entitytype, initial_terminal_data, parameters)
-
-        # Build replacement map for modified terminals with zero tables
-        z = as_ufl(0.0)
-        for i, mt in zip(initial_terminal_indices, initial_terminal_data):
-            tr = mt_table_ranges.get(mt)
-            if tr is not None:
-                uname, begin, end = tr
-                ttype = table_types[uname]
-                # Any modified terminal with zero table is itself a zero value
-                if ttype == "zeros":
-                    V[i] = z
-        # Propagate expression changes
-        # (could possibly use replace() on target expressions instead)
-        for i in range(len(V)):
-            deps = [V[j] for j in V_deps[i]]
-            if deps:
-                V[i] = V[i]._ufl_expr_reconstruct_(*deps)
-
-        # Rebuild scalar target expressions and graph
-        # (this may be overkill and possible to optimize
-        # away if it turns out to be costly)
-        expressions = [V[i] for i in V_targets]
-
-        # Rebuild scalar list-based graph representation
-        SV, SV_deps, SV_targets = build_scalar_graph(expressions)
-        assert all(i < len(SV) for i in SV_targets)
-
-
-        # Compute factorization of arguments
-        (argument_factorizations, modified_arguments,
-             FV, FV_deps, FV_targets) = \
-            compute_argument_factorization(SV, SV_deps, SV_targets, rank)
-        assert len(SV_targets) == len(argument_factorizations)
-
-        # TODO: Still expecting one target variable in code generation
-        assert len(argument_factorizations) == 1
-        argument_factorization, = argument_factorizations
-
-        # Store modified arguments in analysed form
-        for i in range(len(modified_arguments)):
-            modified_arguments[i] = analyse_modified_terminal(modified_arguments[i])
-
-        # Build set of modified_terminal indices into factorized_vertices
-        modified_terminal_indices = [i for i, v in enumerate(FV)
-                                     if is_modified_terminal(v)]
-
-        # Build set of modified terminal ufl expressions
-        modified_terminals = [analyse_modified_terminal(FV[i])
-                              for i in modified_terminal_indices]
-
-        # Organize table data more, split into arguments and other terminals
-        modified_terminal_table_ranges = [mt_table_ranges.get(mt)
-                                          for mt in modified_terminals]
-        modified_argument_table_ranges = [mt_table_ranges.get(mt)
-                                          for mt in modified_arguments]
-
-
-        # Dependency analysis
-        inv_FV_deps, FV_active, FV_piecewise, FV_varying = \
-            analyse_dependencies(FV, FV_deps, FV_targets,
-                                 modified_terminal_indices,
-                                 mt_table_ranges,
-                                 table_types)
-
-        # Mark active modified arguments
-        #active_modified_arguments = numpy.zeros(len(modified_arguments), dtype=int)
-        #for ma_indices in argument_factorization:
-        #    for j in ma_indices:
-        #        active_modified_arguments[j] = 1
-
-
-        # Figure out which table names are active
-        active_table_names = set()
-        for i, tr in zip(modified_terminal_indices, modified_terminal_table_ranges):
-            if FV_active[i] and tr is not None:
-                active_table_names.add(tr[0])
-        for ma_indices in argument_factorization:
-            for j in ma_indices:
-                tr = modified_argument_table_ranges[j]
-                if tr is not None:
-                    active_table_names.add(tr[0])
-
-        # Drop tables not referenced from modified terminals
-        # and and tables of zeros and ones
-        unused_types = ("zeros", "ones", "quadrature")
-        used_table_names = set(name for name in active_table_names
-                               if name is not None
-                                  and table_types[name] not in unused_types)
-        unique_tables = { name: unique_tables[name] for name in used_table_names }
-
-
-        # Analyse active terminals to check what we'll need to generate code for
-        active_mts = [mt for i, mt in zip(modified_terminal_indices, modified_terminals)
-                      if FV_active[i]]
-
-        # Figure out if we need to access CellCoordinate to
-        # avoid generating quadrature point table otherwise
-        if integral_type == "cell":
-            need_points = any(isinstance(mt.terminal, CellCoordinate)
-                              for mt in active_mts)
-        elif integral_type in ("interior_facet", "exterior_facet"):
-            need_points = any(isinstance(mt.terminal, FacetCoordinate)
-                              for mt in active_mts)
-        else:
-            need_points = False
-
-        # Figure out if we need to access QuadratureWeight to
-        # avoid generating quadrature point table otherwise
-        need_weights = any(isinstance(mt.terminal, QuadratureWeight)
-                           for mt in active_mts)
-
-        # Loop over factorization terms
-        from collections import defaultdict
-        block_contributions = {
-            # TODO: Should not store piecewise blocks inside num_points context
-            "piecewise": defaultdict(list),
-            "varying": defaultdict(list)
-            }
-        for ma_indices, fi in sorted(argument_factorization.items()):
-            # Get a bunch of information about this term
-            rank = len(ma_indices)
-            trs = tuple(modified_argument_table_ranges[ai] for ai in ma_indices)
-            unames = tuple(tr[0] for tr in trs)
-            dofblock = tuple(tr[1:3] for tr in trs)
-            ttypes = tuple(table_types[name] for name in unames)
-            assert not any(tt == "zeros" for tt in ttypes)
-
-            piecewise_types = ("piecewise", "fixed", "ones")
-            if FV_piecewise[fi] and all(tt in piecewise_types for tt in ttypes):
-                contributions = block_contributions["piecewise"][dofblock]
-            else:
-                contributions = block_contributions["varying"][dofblock]
-
-            data = (ma_indices, fi, trs, unames, ttypes)
-            contributions.append(data)
-
-
-        # Build IR dict for the given expressions
-        expr_ir = {}
-
-        expr_ir["block_contributions"] = block_contributions
-        
-        # (array) FV-index -> UFL subexpression
-        expr_ir["V"] = FV
-
-        # (array) Flattened input expression component index -> FV-index
-        expr_ir["target_variables"] = FV_targets
-
-        ### Result of factorization:
-        # (array) MA-index -> UFL expression of modified arguments
-        expr_ir["modified_arguments"] = modified_arguments
-
-        # (dict) tuple(MA-indices) -> FV-index of monomial factor
-        expr_ir["argument_factorization"] = argument_factorization
-
-        ### Modified terminals
-        # (array) list of FV-indices to modified terminals
-        expr_ir["modified_terminal_indices"] = modified_terminal_indices
-
-        # Dependency structure of graph:
-        # (CRSArray) FV-index -> direct dependency FV-index list
-        #expr_ir["dependencies"] = FV_deps
-
-        # (CRSArray) FV-index -> direct dependee FV-index list
-        #expr_ir["inverse_dependencies"] = inv_FV_deps
-
-        # Metadata about each vertex
-        expr_ir["active"] = FV_active        # (array) FV-index -> bool
-        expr_ir["piecewise"] = FV_piecewise  # (array) FV-index -> bool
-        expr_ir["varying"] = FV_varying      # (array) FV-index -> bool
-
-        expr_ir["modified_terminal_table_ranges"] = modified_terminal_table_ranges
-        expr_ir["modified_argument_table_ranges"] = modified_argument_table_ranges
-
-        # Store table data in FV indexing, this is used in integralgenerator
-        expr_ir["table_ranges"] = numpy.empty(len(FV), dtype=object)
-        expr_ir["table_ranges"][expr_ir["modified_terminal_indices"]] = \
-            expr_ir["modified_terminal_table_ranges"]
-
-        expr_ir["need_points"] = need_points
-        expr_ir["need_weights"] = need_weights
-
-        # Store the tables and ranges
-        expr_ir["table_types"] = table_types
-        expr_ir["unique_tables"] = unique_tables
-
-
-        # TODO: Some tables are associated with num_points, some are not
-        #       (i.e. piecewise constant, averaged and x0).
-        #       It will be easier to deal with that if we can join
-        #       the expr_ir for all num_points as mentioned above.
-        ir["expr_irs"][num_points] = expr_ir
-
-    return ir
-
-
-def build_scalar_graph(expressions):
-    """Build list representation of expression graph covering the given expressions.
-
-    TODO: Renaming, refactoring and cleanup of the graph building algorithms used in here
-    """
-
-    # Build the initial coarse computational graph of the expression
-    G = build_graph(expressions)
-
-    assert len(expressions) == 1, "FIXME: Multiple expressions in graph building needs more work from this point on."
-
-    # Build more fine grained computational graph of scalar subexpressions
-    # TODO: Make it so that
-    #   expressions[k] <-> NV[nvs[k][:]],
-    #   len(nvs[k]) == value_size(expressions[k])
-    scalar_expressions = rebuild_with_scalar_subexpressions(G)
-
-    # Sanity check on number of scalar symbols/components
-    assert len(scalar_expressions) == sum(product(expr.ufl_shape) for expr in expressions)
-
-    # Build new list representation of graph where all
-    # vertices of V represent single scalar operations
-    e2i, V, V_targets = build_scalar_graph_vertices(scalar_expressions)
-
-    # Compute sparse dependency matrix
-    V_deps = compute_dependencies(e2i, V)
-
-    return V, V_deps, V_targets
-
-
-def analyse_dependencies(V, V_deps, V_targets,
-                         modified_terminal_indices,
-                         mt_table_ranges,
-                         table_types):
-    # Count the number of dependencies every subexpr has
-    V_depcount = compute_dependency_count(V_deps)
-
-    # Build the 'inverse' of the sparse dependency matrix
-    inv_deps = invert_dependencies(V_deps, V_depcount)
-
-    # Mark subexpressions of V that are actually needed for final result
-    active, num_active = mark_active(V_deps, V_targets)
-
-    # Build piecewise/varying markers for factorized_vertices
-    varying_indices = []
-    for i in modified_terminal_indices:
-
-        # TODO: Can probably avoid this re-analysis by
-        # passing other datastructures in here:
-        mt = analyse_modified_terminal(V[i])
-        tr = mt_table_ranges.get(mt)
-        if tr is not None:
-            # Check if table computations have revealed values varying over points
-            uname = tr[0]
-            ttype = table_types[uname]
-            # Note: uniform means entity-wise uniform, varying over points
-            if ttype in ("varying", "uniform", "quadrature"):
-                varying_indices.append(i)
-            else:
-                if ttype not in ("fixed", "piecewise", "ones", "zeros"):
-                    error("Invalid ttype %s" % (ttype,))
-
-        elif not is_cellwise_constant(V[i]):
-            # Keeping this check to be on the safe side,
-            # not sure which cases this will cover (if any)
-            varying_indices.append(i)
-
-    # Mark every subexpression that is computed
-    # from the spatially dependent terminals
-    varying, num_varying = mark_image(inv_deps, varying_indices)
-
-    # The rest of the subexpressions are piecewise constant (1-1=0, 1-0=1)
-    piecewise = 1 - varying
-
-    # Unmark non-active subexpressions
-    varying *= active
-    piecewise *= active
-
-    # TODO: Skip literals in both varying and piecewise
-    # nonliteral = ...
-    # varying *= nonliteral
-    # piecewise *= nonliteral
-
-    return inv_deps, active, piecewise, varying
-
-
-# TODO: Consider comments below and do it or delete them.
-
-""" Old comments:
-
-Work for later::
-
-        - Apply some suitable renumbering of vertices and corresponding arrays prior to returning
-
-        - Allocate separate registers for each partition
-          (but e.g. argument[iq][i0] may need to be accessible in other loops)
-
-        - Improve register allocation algorithm
-
-        - Take a list of expressions as input to compile several expressions in one joined graph
-          (e.g. to compile a,L,M together for nonlinear problems)
-
-"""
-
-
-""" # Old comments:
-
-    # TODO: Inspection of varying shows that factorization is
-    # needed for effective loop invariant code motion w.r.t. quadrature loop as well.
-    # Postphoning that until everything is working fine again.
-    # Core ingredients for such factorization would be:
-    # - Flatten products of products somehow
-    # - Sorting flattened product factors by loop dependency then by canonical ordering
-    # Or to keep binary products:
-    # - Rebalancing product trees ((a*c)*(b*d) -> (a*b)*(c*d)) to make piecewise quantities 'float' to the top of the list
-
-    # rank = max(len(ma_indices) for ma_indices in argument_factorization)
-    # for i,a in enumerate(modified_arguments):
-    #    iarg = a.number()
-    # ipart = a.part()
-
-    # TODO: More structured MA organization?
-    #modified_arguments[rank][block][entry] -> UFL expression of modified argument
-    #dofranges[rank][block] -> (begin, end)
-    # or
-    #modified_arguments[rank][entry] -> UFL expression of modified argument
-    #dofrange[rank][entry] -> (begin, end)
-    #argument_factorization: (dict) tuple(MA-indices (only relevant ones!)) -> V-index of monomial factor
-    # becomes
-    #argument_factorization: (dict) tuple(entry for each(!) rank) -> V-index of monomial factor ## doesn't cover intermediate f*u in f*u*v!
-"""
-
-
-"""
-def old_code_useful_for_optimization():
-
-    # Use heuristics to mark the usefulness of storing every subexpr in a variable
-    scores = compute_cache_scores(V,
-                                  active,
-                                  dependencies,
-                                  inverse_dependencies,
-                                  partitions,  # TODO: Rewrite in terms of something else, this doesn't exist anymore
-                                  cache_score_policy=default_cache_score_policy)
-
-    # Allocate variables to store subexpressions in
-    allocations = allocate_registers(active, partitions, target_variables,
-                                     scores, int(parameters["max_registers"]), int(parameters["score_threshold"]))
-    target_registers = [allocations[r] for r in target_variables]
-    num_registers = sum(1 if x >= 0 else 0 for x in allocations)
-    # TODO: If we renumber we can allocate registers separately for each partition, which is probably a good idea.
-
-    expr_oir = {}
-    expr_oir["num_registers"] = num_registers
-    expr_oir["partitions"] = partitions
-    expr_oir["allocations"] = allocations
-    expr_oir["target_registers"] = target_registers
-    return expr_oir
-"""
-
diff --git a/ffc/uflacs/tools.py b/ffc/uflacs/tools.py
index e8f900b..479ef35 100644
--- a/ffc/uflacs/tools.py
+++ b/ffc/uflacs/tools.py
@@ -39,14 +39,8 @@ def collect_quadrature_rules(integrals, default_scheme, default_degree):
     return rules
 
 
-def compute_quadrature_rules(itg_data):
+def compute_quadrature_rules(rules, integral_type, cell):
     "Compute points and weights for a set of quadrature rules."
-    # Collect which quadrature rules occur in integrals
-    default_scheme = itg_data.metadata["quadrature_degree"]
-    default_degree = itg_data.metadata["quadrature_rule"]
-    rules = collect_quadrature_rules(
-        itg_data.integrals, default_scheme, default_degree)
-
     quadrature_rules = {}
     quadrature_rule_sizes = {}
     for rule in rules:
@@ -54,7 +48,7 @@ def compute_quadrature_rules(itg_data):
 
         # Compute quadrature points and weights
         (points, weights) = create_quadrature_points_and_weights(
-            itg_data.integral_type, itg_data.domain.ufl_cell(), degree, scheme)
+            integral_type, cell, degree, scheme)
 
         if points is not None:
             points = numpy.asarray(points)
@@ -83,18 +77,23 @@ def accumulate_integrals(itg_data, quadrature_rule_sizes):
     if not itg_data.integrals:
         return {}
 
-    default_scheme = itg_data.metadata["quadrature_degree"]
-    default_degree = itg_data.metadata["quadrature_rule"]
-
     # Group integrands by quadrature rule
     sorted_integrands = collections.defaultdict(list)
-    for integral in itg_data.integrals:
-        md = integral.metadata() or {}
-        scheme = md.get("quadrature_rule", default_scheme)
-        degree = md.get("quadrature_degree", default_degree)
-        rule = (scheme, degree)
-        num_points = quadrature_rule_sizes[rule]
-        sorted_integrands[num_points].append(integral.integrand())
+    if itg_data.integral_type in custom_integral_types:
+        # Should only be one size here, ignoring irrelevant metadata and parameters
+        num_points, = quadrature_rule_sizes.values()
+        for integral in itg_data.integrals:
+            sorted_integrands[num_points].append(integral.integrand())
+    else:
+        default_scheme = itg_data.metadata["quadrature_degree"]
+        default_degree = itg_data.metadata["quadrature_rule"]
+        for integral in itg_data.integrals:
+            md = integral.metadata() or {}
+            scheme = md.get("quadrature_rule", default_scheme)
+            degree = md.get("quadrature_degree", default_degree)
+            rule = (scheme, degree)
+            num_points = quadrature_rule_sizes[rule]
+            sorted_integrands[num_points].append(integral.integrand())
 
     # Accumulate integrands in a canonical ordering defined by UFL
     sorted_integrals = {
diff --git a/ffc/uflacs/uflacsgenerator.py b/ffc/uflacs/uflacsgenerator.py
index 3813775..fb0ae1c 100644
--- a/ffc/uflacs/uflacsgenerator.py
+++ b/ffc/uflacs/uflacsgenerator.py
@@ -23,7 +23,7 @@ from ffc.log import info
 from ffc.representationutils import initialize_integral_code
 
 from ffc.uflacs.backends.ffc.backend import FFCBackend
-from ffc.uflacs.generation.integralgenerator import IntegralGenerator
+from ffc.uflacs.integralgenerator import IntegralGenerator
 from ffc.uflacs.language.format_lines import format_indented_lines
 
 
@@ -32,47 +32,30 @@ def generate_integral_code(ir, prefix, parameters):
 
     info("Generating code from ffc.uflacs representation")
 
-    # Generate generic ffc code snippets
-    code = initialize_integral_code(ir, prefix, parameters)
-
-    # Generate tabulate_tensor body using uflacs algorithms
-    uflacs_code = generate_tabulate_tensor_code(ir, prefix, parameters)
-
-    code["tabulate_tensor"] = uflacs_code["tabulate_tensor"]
-
-    # TODO: Use code generation utils here for consistency
-    if ir.get("num_cells") is not None:
-        code["num_cells"] = "  return %d;" % (ir["num_cells"],)
-
-    code["additional_includes_set"] = set()
-    code["additional_includes_set"].update(ir.get("additional_includes_set",()))
-    code["additional_includes_set"].update(uflacs_code["additional_includes_set"])
-
-    return code
-
-
-def generate_tabulate_tensor_code(ir, prefix, parameters):
+    # FIXME: Is this the right precision value to use? Make it default to None or 0.
+    precision = ir["integrals_metadata"]["precision"]
 
     # Create FFC C++ backend
     backend = FFCBackend(ir, parameters)
 
-    # Create code generator for integral body
-    ig = IntegralGenerator(ir, backend)
+    # Configure kernel generator
+    ig = IntegralGenerator(ir, backend, precision)
 
     # Generate code ast for the tabulate_tensor body
     parts = ig.generate()
 
-    # Format code AST as one string
-    body = format_indented_lines(parts.cs_format(), 1)
+    # Format code as string
+    body = format_indented_lines(parts.cs_format(precision), 1)
 
-    # Fetch includes
-    includes = set(ig.get_includes())
+    # Generate generic ffc code snippets and add uflacs specific parts
+    code = initialize_integral_code(ir, prefix, parameters)
+    code["tabulate_tensor"] = body
+    code["additional_includes_set"] = set(ig.get_includes())
+    code["additional_includes_set"].update(ir.get("additional_includes_set", ()))
 
-    # Format uflacs specific code structures into a single
-    # string and place in dict before returning to ffc
-    code = {
-        "tabulate_tensor": body,
-        "additional_includes_set": includes,
-    }
+    # TODO: Move to initialize_integral_code, this is not representation specific
+    if ir.get("num_cells") is not None:
+        ret = backend.language.Return(ir["num_cells"])
+        code["num_cells"] = format_indented_lines(ret.cs_format(), 1)
 
     return code
diff --git a/ffc/uflacs/uflacsrepresentation.py b/ffc/uflacs/uflacsrepresentation.py
index 23c1252..9138f70 100644
--- a/ffc/uflacs/uflacsrepresentation.py
+++ b/ffc/uflacs/uflacsrepresentation.py
@@ -20,12 +20,13 @@ import numpy
 
 from ufl.algorithms import replace
 from ufl.utils.sorting import sorted_by_count
+from ufl import custom_integral_types
 
 from ffc.log import info
 from ffc.representationutils import initialize_integral_ir
 from ffc.fiatinterface import create_element
-from ffc.uflacs.tools import compute_quadrature_rules, accumulate_integrals
-from ffc.uflacs.representation.build_uflacs_ir import build_uflacs_ir
+from ffc.uflacs.tools import collect_quadrature_rules, compute_quadrature_rules, accumulate_integrals
+from ffc.uflacs.build_uflacs_ir import build_uflacs_ir
 
 
 def compute_integral_ir(itg_data,
@@ -44,11 +45,6 @@ def compute_integral_ir(itg_data,
     # Store element classnames
     ir["classnames"] = classnames
 
-    # TODO: Set alignas and padlen from parameters
-    sizeof_double = 8
-    ir["alignas"] = 32
-    ir["padlen"] = ir["alignas"] // sizeof_double
-
     # Get element space dimensions
     unique_elements = element_numbers.keys()
     ir["element_dimensions"] = { ufl_element: create_element(ufl_element).space_dimension()
@@ -65,12 +61,33 @@ def compute_integral_ir(itg_data,
     else:
         ir["tensor_shape"] = argument_dimensions
 
+    integral_type = itg_data.integral_type
+    cell = itg_data.domain.ufl_cell()
+
+    if integral_type in custom_integral_types:
+        # Set quadrature degree to twice the highest element degree, to get
+        # enough points to identify basis functions via table computations
+        max_element_degree = max([1] + [ufl_element.degree() for ufl_element in unique_elements])
+        rules = [("default", 2*max_element_degree)]
+        quadrature_integral_type = "cell"
+    else:
+        # Collect which quadrature rules occur in integrals
+        default_scheme = itg_data.metadata["quadrature_degree"]
+        default_degree = itg_data.metadata["quadrature_rule"]
+        rules = collect_quadrature_rules(
+            itg_data.integrals, default_scheme, default_degree)
+        quadrature_integral_type = integral_type
+
     # Compute actual points and weights
-    quadrature_rules, quadrature_rule_sizes = compute_quadrature_rules(itg_data)
+    quadrature_rules, quadrature_rule_sizes = compute_quadrature_rules(
+        rules, quadrature_integral_type, cell)
 
     # Store quadrature rules in format { num_points: (points, weights) }
     ir["quadrature_rules"] = quadrature_rules
 
+    # Store the fake num_points for analysis in custom integrals
+    if integral_type in custom_integral_types:
+        ir["fake_num_points"], = quadrature_rules.keys()
 
     # Group and accumulate integrals on the format { num_points: integral data }
     sorted_integrals = accumulate_integrals(itg_data, quadrature_rule_sizes)
@@ -122,11 +139,4 @@ def compute_integral_ir(itg_data,
                                 parameters)
     ir.update(uflacs_ir)
 
-    # Consistency check on quadrature rules
-    rules1 = sorted(ir["expr_irs"].keys())
-    rules2 = sorted(ir["quadrature_rules"].keys())
-    if rules1 != rules2:
-        warning("Found different rules in expr_irs and "
-                "quadrature_rules:\n{0}\n{1}".format(rules1, rules2))
-
     return ir
diff --git a/setup.py b/setup.py
index 18dd289..f7d34ef 100755
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,6 @@ from __future__ import print_function
 
 import os
 import sys
-import re
 import subprocess
 import string
 
@@ -13,8 +12,7 @@ if sys.version_info < (2, 7):
     print("Python 2.7 or higher required, please upgrade.")
     sys.exit(1)
 
-VERSION = re.findall('__version__ = "(.*)"',
-                     open('ffc/__init__.py', 'r').read())[0]
+VERSION = "2017.1.0"
 
 URL = "https://bitbucket.org/fenics-project/ffc/"
 
@@ -152,5 +150,6 @@ def run_install():
                             "dijitso==%s" % VERSION],
           zip_safe=False)
 
+
 if __name__ == "__main__":
     run_install()
diff --git a/test/regression/README.rst b/test/regression/README.rst
index d9b2811..ff96202 100644
--- a/test/regression/README.rst
+++ b/test/regression/README.rst
@@ -1,12 +1,12 @@
 How to run regression tests
 ===========================
 
-To run regression tests with default parameters, simply run:
+To run regression tests with default parameters, simply run::
 
   cd <ffcdir>/tests/regression/
   python test.py
 
-Look at test.py for more options.
+Look at ``test.py`` for more options.
 
 
 How to update references
@@ -14,29 +14,35 @@ How to update references
 
 To update the references for the FFC regression tests, first commit
 your changes, then run the regression test (to generate the new
-references) and finally run the script upload:
+references) and finally run the script upload::
 
   <commit your changes>
   cd <ffcdir>/tests/regression/
-  python test.py
+  python test.py [--use-tsfc]
   ./scripts/upload
 
+Note: Contributors are encouraged to install also TSFC stack and update
+references including ``tsfc`` representation using ``--use-tsfc`` flag.
+For the installation instructions see ``doc/sphinx/source/installation.rst``.
+Note that ``tsfc`` regression test are run in separate plans on the Bamboo
+CI system.
+
 Note: You may be asked for your *Bitbucket* username and password when
 uploading the reference data, if use of ssh keys fails.
 
 Note: The upload script will push the new references to the
-ffc-reference-data repository. This is harmless even if these
+``ffc-reference-data`` repository. This is harmless even if these
 references are not needed later.
 
-Note: The upload script will update the file ffc-regression-data-id
+Note: The upload script will update the file ``ffc-regression-data-id``
 and commit this change to the currently active branch, remember to
 include this commit when merging or pushing your changes elsewhere.
 
 Note: You can cherry-pick the commit that updated
-ffc-regression-data-id into another branch to use the same set of
+``ffc-regression-data-id`` into another branch to use the same set of
 references there.
 
-Note: If you ever get merge conflicts in the ffc-regression-data-id,
+Note: If you ever get merge conflicts in the ``ffc-regression-data-id``,
 always pick one version of the file. Most likely you'll need to update
 the references again.
 
@@ -46,13 +52,47 @@ How to run regression tests against a different set of regression data
 
 To run regression tests and compare to a different set of regression
 data, perhaps to see what has changed in generated code since a
-certain version, check out the ffc-regression-data-id file you want
-and run tests as usual
+certain version, check out the ``ffc-regression-data-id`` file you want
+and run tests as usual::
 
   cd <ffcdir>/tests/regression/
   git checkout <ffc-commit-id> ffc-regression-data-id
   python test.py
 
 The test.py script will run scripts/download which will check out the
-regression data with the commit id from ffc-regression-data-id in
-ffc-regression-data/.
+regression data with the commit id from ``ffc-regression-data-id`` in
+``ffc-regression-data/``.
+
+
+How to inspect diff in output from executed generated code
+==========================================================
+
+Say you have differences in the output of PoissonDG,
+you can diff the ``.json`` files (NB! within some tolerance
+on the floating point accuracy!) like this::
+
+  python recdiff.py ffc-reference-data/r_uflacs/PoissonDG.json output/r_uflacs/PoissonDG.json
+  python recdiff.py output/r_uflacs/PoissonDG.json output/r_tensor/PoissonDG.json
+
+Pick any combination of ``ffc-reference-data | output`` and
+``r_foo | r_bar`` you want to compare.
+
+
+How to manually delete old reference data
+=========================================
+
+If you update the tests such that some data should no longer be kept
+in the data repository, this approach allows deleting reference data::
+
+  cd ffc-regression-data
+  git checkout master
+  git pull
+  git rm -rf <old-directory-or-files>
+  git commit -a -m"Manually updated data because ..."
+  git rev-parse HEAD > ../ffc-regression-data-id
+  cd ..
+  git commit ffc-regression-data-id -m"Manually updated reference id."
+
+This is not automated because it happens rarely.
+Probably a good idea to coordinate with other devs so they
+don't introduce the deleted files with another branch.
diff --git a/test/regression/ffc-reference-data-id b/test/regression/ffc-reference-data-id
index 891310b..6b27cfd 100644
--- a/test/regression/ffc-reference-data-id
+++ b/test/regression/ffc-reference-data-id
@@ -1 +1 @@
-830e8f7a0bf7daee4acd819ff854d8d608a69194
+3bf77df251501ac5aafcad2febe4efbef076218b
diff --git a/test/regression/recdiff.py b/test/regression/recdiff.py
index 545c4b2..8d6464b 100644
--- a/test/regression/recdiff.py
+++ b/test/regression/recdiff.py
@@ -12,10 +12,11 @@ class DiffMarkerType:
     def __repr__(self):
         return self.name
 
+
 DiffMissing = DiffMarkerType("<value missing>")
 DiffEqual = DiffMarkerType("<equal>")
 
-_default_recdiff_tolerance = 1e-6
+_default_recdiff_tolerance = 1e-5
 
 
 def recdiff_dict(data1, data2, tolerance=_default_recdiff_tolerance):
diff --git a/test/regression/scripts/upload b/test/regression/scripts/upload
index fc317b9..9319384 100755
--- a/test/regression/scripts/upload
+++ b/test/regression/scripts/upload
@@ -42,7 +42,7 @@ fi
 
 # Copy references
 echo "Copying new reference data to $DATA_DIR"
-rsync -r  --exclude='README.rst' --exclude='*.bin' --exclude='*.cpp' $OUTPUT_DIR/ $DATA_DIR
+rsync -r  --exclude='README.rst' --exclude='*.build' --exclude='*.bin' --exclude='*.cpp' --exclude='*~' --exclude='.*' --exclude='*#*' $OUTPUT_DIR/ $DATA_DIR
 echo ""
 
 # Get current id for main repo (does not include dirty files, so not quite trustworthy!)
diff --git a/test/regression/test.py b/test/regression/test.py
index 68d3182..47b2bee 100644
--- a/test/regression/test.py
+++ b/test/regression/test.py
@@ -36,6 +36,7 @@ option --bench.
 # FIXME: Need to add many more test cases. Quite a few DOLFIN forms
 # failed after the FFC tests passed.
 
+from collections import OrderedDict
 import os
 import sys
 import shutil
@@ -44,10 +45,11 @@ import sysconfig
 import subprocess
 import time
 import logging
+import traceback
 from numpy import array, shape, abs, max, isnan
 import ffc
 from ffc.log import begin, end, info, info_red, info_green, info_blue
-from ffc.log import ffc_logger, ERROR, WARNING
+from ffc.log import ffc_logger, ERROR
 from ufl.log import ufl_logger
 from ufl.utils.py23 import as_native_str
 from ffc import get_ufc_cxx_flags
@@ -57,7 +59,7 @@ from ufctest import generate_test_code
 # Parameters TODO: Can make this a cmdline argument, and start
 # crashing programs in debugger automatically?
 debug = False
-output_tolerance = 1.e-6
+output_tolerance = 1e-5
 demo_directory = "../../../../demo"
 bench_directory = "../../../../bench"
 
@@ -84,6 +86,7 @@ class LTFilter(object):
     def filter(self, record):
         return record.levelno < self.__level
 
+
 # Filter out error messages from std output
 splitlevel = ERROR
 ffc_logger.get_handler().addFilter(LTFilter(splitlevel))
@@ -105,18 +108,90 @@ ext_quad = [
     "-r quadrature -O -fprecompute_basis_const -feliminate_zeros",
 ]
 
-# Extended uflacs tests (to be extended with optimisation parameters
-# later)
+# Extended uflacs tests
+# (to be extended with optimisation parameters later)
 ext_uflacs = [
-    "-r uflacs",
+    "-r uflacs -O -fvectorize -fpadlen=4 -falignas=32",
+    "-r uflacs -O -fno-enable_sum_factorization",
+    "-r uflacs -O -fno-enable_preintegration",
+    "-r uflacs -O -fenable_premultiplication",
 ]
 
+known_quad_failures = set([
+    "PoissonQuad.ufl",
+])
+
 known_uflacs_failures = set([
     "CustomIntegral.ufl",
     "CustomMixedIntegral.ufl",
     "CustomVectorIntegral.ufl",
+    "MetaData.ufl",
+])
+
+known_tensor_failures = set([
+#    "AdaptivePoisson.ufl",
+    "AlgebraOperators.ufl",
+    "BiharmonicHHJ.ufl",
+    "BiharmonicRegge.ufl",
+    "Biharmonic.ufl",
+    "CellGeometry.ufl",
+    "CoefficientOperators.ufl",
+#    "Components.ufl",
+    "Conditional.ufl",
+#    "Constant.ufl",
+    "CustomIntegral.ufl",
+    "CustomMixedIntegral.ufl",
+    "CustomVectorIntegral.ufl",
+#    "Elasticity.ufl",
+#    "EnergyNorm.ufl",
+#    "Equation.ufl",
+    "FacetIntegrals.ufl",
+#    "FacetRestrictionAD.ufl",
+#    "Heat.ufl",
+    "HyperElasticity.ufl",
+#    "Mass.ufl",
+    "MathFunctions.ufl",
+    "MetaData.ufl",
+#    "Mini.ufl",
+#    "MixedCoefficient.ufl",
+#    "MixedMixedElement.ufl",
+#    "MixedPoissonDual.ufl",
+#    "MixedPoisson.ufl",
+#    "NavierStokes.ufl",
+#    "NeumannProblem.ufl",
+    "Normals.ufl",
+#    "Optimization.ufl",
+#    "P5tet.ufl",
+#    "P5tri.ufl",
+    "PointMeasure.ufl",
+#    "Poisson1D.ufl",
+    "PoissonDG.ufl",
+    "PoissonQuad.ufl",
+#    "Poisson.ufl",
+#    "ProjectionManifold.ufl",
+    "QuadratureElement.ufl",
+#    "ReactionDiffusion.ufl",
+#    "RestrictedElement.ufl",
+    "SpatialCoordinates.ufl",
+#    "StabilisedStokes.ufl",
+#    "Stokes.ufl",
+#    "SubDomains.ufl",
+#    "SubDomain.ufl",
+    "TensorWeightedPoisson.ufl",
+#    "TraceElement.ufl",
+#    "VectorLaplaceGradCurl.ufl",
+#    "VectorPoisson.ufl",
 ])
 
+known_tsfc_failures = set([
+    # Expected not to work
+    "CustomIntegral.ufl",
+    "CustomMixedIntegral.ufl",
+    "CustomVectorIntegral.ufl",
+    "MetaData.ufl",
+])
+
+
 _command_timings = []
 
 
@@ -136,8 +211,9 @@ def run_command(command):
     except subprocess.CalledProcessError as e:
         t2 = time.time()
         _command_timings.append((command, t2 - t1))
-        log_error(e.output)
-        print(e.output)
+        if e.output:
+            log_error(e.output)
+            print(e.output)
         return False
 
 
@@ -207,6 +283,8 @@ def generate_code(args, only_forms, skip_forms):
     # some sort of tag like '#ffc: <flags>'.
     special = {"AdaptivePoisson.ufl": "-e", }
 
+    failures = []
+
     # Iterate over all files
     for f in form_files:
         options = [special.get(f, "")]
@@ -222,9 +300,12 @@ def generate_code(args, only_forms, skip_forms):
         try:
             ok = ffc.main(options)
         except Exception as e:
-            log_error(e)
+            if debug:
+                raise e
+            msg = traceback.format_exc()
+            log_error(cmd)
+            log_error(msg)
             ok = 1
-            raise
         finally:
             t2 = time.time()
             _command_timings.append((cmd, t2 - t1))
@@ -234,8 +315,10 @@ def generate_code(args, only_forms, skip_forms):
             info_green("%s OK" % f)
         else:
             info_red("%s failed" % f)
+            failures.append(f)
 
     end()
+    return failures
 
 
 def validate_code(reference_dir):
@@ -247,6 +330,8 @@ def validate_code(reference_dir):
     begin("Validating generated code (%d header files found)"
           % len(header_files))
 
+    failures = []
+
     # Iterate over all files
     for f in header_files:
 
@@ -274,8 +359,10 @@ def validate_code(reference_dir):
                  % os.path.join(*reference_file.split(os.path.sep)[-3:]))
             log_error("\n" + s + "\n" + len(s) * "-")
             log_error(diff)
+            failures.append(f)
 
     end()
+    return failures
 
 
 def find_boost_cflags():
@@ -335,44 +422,72 @@ def build_programs(bench, permissive):
 
     # Get UFC flags
     ufc_cflags = "-I" + get_ufc_include() + " " + " ".join(get_ufc_cxx_flags())
+
+    # Get boost flags
     boost_cflags, boost_linkflags = find_boost_cflags()
-    ufc_cflags += boost_cflags
-    linker_options = boost_linkflags
+
+    # Get compiler
+    compiler = os.getenv("CXX", "g++")
 
     # Set compiler options
-    compiler_options = "%s -Wall " % ufc_cflags
+    compiler_options = " -Wall"
     if not permissive:
         compiler_options += " -Werror -pedantic"
+
+    # Always need ufc
+    compiler_options += " " + ufc_cflags
+
     if bench:
         info("Benchmarking activated")
-        # Takes too long to build with -O2
-        # compiler_options += " -O2"
-        compiler_options += " -O3"
-        # compiler_options += " -O3 -fno-math-errno -march=native"
+        compiler_options += " -O3 -march=native"
+        # Workaround for gcc bug: gcc is too eager to report array-bounds warning with -O3
+        compiler_options += " -Wno-array-bounds"
+
     if debug:
         info("Debugging activated")
         compiler_options += " -g -O0"
+
     info("Compiler options: %s" % compiler_options)
 
+    failures = []
+
     # Iterate over all files
     for f in header_files:
+        prefix = f.split(".h")[0]
+
+        # Options for all files
+        cpp_flags = compiler_options
+        ld_flags = ""
+
+        # Only add boost flags if necessary
+        needs_boost = prefix == "MathFunctions"
+        if needs_boost:
+            info("Additional compiler options for %s: %s" % (prefix, boost_cflags))
+            info("Additional linker options for %s: %s" % (prefix, boost_linkflags))
+            cpp_flags += " " + boost_cflags
+            ld_flags += " " + boost_linkflags
 
         # Generate test code
         filename = generate_test_code(f)
 
         # Compile test code
-        prefix = f.split(".h")[0]
-        command = "g++ %s -o %s.bin %s.cpp %s" % \
-                  (compiler_options, prefix, prefix, linker_options)
+        command = "%s %s -o %s.bin %s.cpp %s" % \
+                  (compiler, cpp_flags, prefix, prefix, ld_flags)
         ok = run_command(command)
 
+        # Store compile command for easy reproduction
+        with open("%s.build" % (prefix,), "w") as f:
+            f.write(command + "\n")
+
         # Check status
         if ok:
             info_green("%s OK" % prefix)
         else:
             info_red("%s failed" % prefix)
+            failures.append(prefix)
 
     end()
+    return failures
 
 
 def run_programs(bench):
@@ -386,6 +501,8 @@ def run_programs(bench):
 
     begin("Running generated programs (%d programs found)" % len(test_programs))
 
+    failures = []
+
     # Iterate over all files
     for f in test_programs:
 
@@ -398,8 +515,9 @@ def run_programs(bench):
             info_green("%s OK" % f)
         else:
             info_red("%s failed" % f)
-
+            failures.append(f)
     end()
+    return failures
 
 
 def validate_programs(reference_dir):
@@ -411,6 +529,8 @@ def validate_programs(reference_dir):
     begin("Validating generated programs (%d .json program output files found)"
           % len(output_files))
 
+    failures = []
+
     # Iterate over all files
     for fj in output_files:
 
@@ -455,18 +575,24 @@ def validate_programs(reference_dir):
             log_error("Json output differs for %s, diff follows (generated first, reference second)"
                       % os.path.join(*reference_json_file.split(os.path.sep)[-3:]))
             print_recdiff(json_diff, printer=log_error)
+            failures.append(fj)
 
     end()
+    return failures
 
 
 def main(args):
     "Run all regression tests."
 
     # Check command-line arguments TODO: Use argparse
-    use_auto = "--skip-auto" not in args
+    only_auto  = "--only-auto" in args
+    use_auto   = "--skip-auto" not in args
+    use_tensor = "--skip-tensor" not in args
     use_uflacs = "--skip-uflacs" not in args
-    use_quad = "--skip-quad" not in args
-    use_ext_quad = "--ext-quad" in args
+    use_quad   = "--skip-quad" not in args
+    use_tsfc   = "--use-tsfc" in args
+    use_ext_quad   = "--ext-quad" in args
+    use_ext_uflacs = "--ext-uflacs" in args
 
     skip_download = "--skip-download" in args
     skip_run = "--skip-run" in args
@@ -474,15 +600,18 @@ def main(args):
     skip_validate = "--skip-validate" in args
     bench = "--bench" in args
 
-    permissive = "--permissive" in args
+    permissive = "--permissive" in args or bench
     tolerant = "--tolerant" in args
     print_timing = "--print-timing" in args
     show_help = "--help" in args
 
     flags = (
+        "--only-auto",
         "--skip-auto",
+        "--skip-tensor",
         "--skip-uflacs",
         "--skip-quad",
+        "--use-tsfc",
         "--ext-quad",
         "--skip-download",
         "--skip-run",
@@ -505,6 +634,8 @@ def main(args):
     if bench:
         skip_code_diff = True
         skip_validate = True
+    if use_ext_quad or use_ext_uflacs:
+        skip_code_diff = True
 
     # Extract .ufl names from args
     only_forms = set([arg for arg in args if arg.endswith(".ufl")])
@@ -534,19 +665,34 @@ def main(args):
 
     # Adjust which test cases (combinations of compile arguments) to run here
     test_cases = []
-    if use_auto:
+    if only_auto:
         test_cases += ["-r auto"]
-    if use_uflacs:
-        test_cases += ["-r uflacs"]
-    if use_quad:
-        test_cases += ["-r quadrature", "-r quadrature -O"]
-    if use_ext_quad:
-        test_cases += ext_quad
+    else:
+        if use_auto:
+            test_cases += ["-r auto"]
+        if use_tensor:
+            test_cases += ["-r tensor"]
+        if use_uflacs:
+            test_cases += ["-r uflacs -O0", "-r uflacs -O"]
+        if use_quad:
+            test_cases += ["-r quadrature -O0", "-r quadrature -O"]
+        if use_tsfc:
+            test_cases += ["-r tsfc -O0", "-r tsfc -O"]
+            # Silence good-performance messages by COFFEE
+            import coffee
+            coffee.set_log_level(coffee.logger.PERF_WARN)
+        if use_ext_quad:
+            test_cases += ext_quad
+        if use_ext_uflacs:
+            test_cases += ext_uflacs
 
     test_case_timings = {}
 
+    fails = OrderedDict()
+
     for argument in test_cases:
         test_case_timings[argument] = time.time()
+        fails[argument] = OrderedDict()
 
         begin("Running regression tests with %s" % argument)
 
@@ -555,10 +701,19 @@ def main(args):
         clean_output(sub_directory)
         os.chdir(sub_directory)
 
-        # Workarounds for partial feature completeness in uflacs
-        if "uflacs" in argument and not only_forms:
+        # Workarounds for feature lack in representation
+        if "quadrature" in argument and not only_forms:
+            skip_forms = known_quad_failures
+            info_blue("Skipping forms known to fail with quadrature:\n" + "\n".join(sorted(skip_forms)))
+        elif "tensor" in argument and not only_forms:
+            skip_forms = known_tensor_failures
+            info_blue("Skipping forms known to fail with tensor:\n" + "\n".join(sorted(skip_forms)))
+        elif "uflacs" in argument and not only_forms:
             skip_forms = known_uflacs_failures
             info_blue("Skipping forms known to fail with uflacs:\n" + "\n".join(sorted(skip_forms)))
+        elif "tsfc" in argument and not only_forms:
+            skip_forms = known_tsfc_failures
+            info_blue("Skipping forms known to fail with tsfc:\n" + "\n".join(sorted(skip_forms)))
         else:
             skip_forms = set()
 
@@ -566,7 +721,9 @@ def main(args):
         generate_test_cases(bench, only_forms, skip_forms)
 
         # Generate code
-        generate_code(args + argument.split(), only_forms, skip_forms)
+        failures = generate_code(args + argument.split(), only_forms, skip_forms)
+        if failures:
+            fails[argument]["generate_code"] = failures
 
         # Location of reference directories
         reference_directory = os.path.abspath("../../ffc-reference-data/")
@@ -579,23 +736,33 @@ def main(args):
 
         # Validate code by comparing to code generated with this set
         # of compiler parameters
-        if skip_code_diff or (argument in ext_quad):
+        if skip_code_diff:
             info_blue("Skipping code diff validation")
         else:
-            validate_code(code_reference_dir)
+            failures = validate_code(code_reference_dir)
+            if failures:
+                fails[argument]["validate_code"] = failures
 
         # Build and run programs and validate output to common
         # reference
         if skip_run:
             info_blue("Skipping program execution")
         else:
-            build_programs(bench, permissive)
-            run_programs(bench)
+            failures = build_programs(bench, permissive)
+            if failures:
+                fails[argument]["build_programs"] = failures
+
+            failures = run_programs(bench)
+            if failures:
+                fails[argument]["run_programs"] = failures
+
             # Validate output to common reference results
             if skip_validate:
                 info_blue("Skipping program output validation")
             else:
-                validate_programs(output_reference_dir)
+                failures = validate_programs(output_reference_dir)
+                if failures:
+                    fails[argument]["validate_programs"] = failures
 
         # Go back up
         os.chdir(os.path.pardir)
@@ -621,6 +788,15 @@ def main(args):
         return 0
     else:
         info_red("Regression tests failed")
+        for argument in test_cases:
+            if fails[argument]:
+                info_red("  Failures for %s:" % argument)
+            else:
+                info_green("  No failures for %s" % argument)
+            for phase, failures in fails[argument].items():
+                info_red("    %d failures in %s:" % (len(failures), phase))
+                for f in failures:
+                    info_red("      %s" % (f,))
         info_red("Error messages stored in %s" % logfile)
         return 1
 
diff --git a/test/regression/ufctest.h b/test/regression/ufctest.h
index b2184b2..6a91cd5 100644
--- a/test/regression/ufctest.h
+++ b/test/regression/ufctest.h
@@ -45,22 +45,28 @@ double time()
 }
 
 // Function for creating "random" vertex coordinates
-std::vector<double> test_coordinate_dofs(int gdim)
+std::vector<double> test_coordinate_dofs(std::size_t gdim, std::size_t gdeg)
 {
   // Generate some "random" coordinates
   std::vector<double> coordinate_dofs;
   if (gdim == 1)
   {
-    coordinate_dofs.resize(4);
+    coordinate_dofs.resize(2);
     coordinate_dofs[0]  = 0.903;
     coordinate_dofs[1]  = 0.561;
     // Huh? Only 2 vertices for interval, is this for tdim=1,gdim=2?
 //    coordinate_dofs[2]  = 0.987;
 //    coordinate_dofs[3]  = 0.123;
+    if (gdeg > 1)
+    {
+      assert(gdeg == 2);
+      coordinate_dofs.resize(3);
+      coordinate_dofs[2]  = 0.750;
+    }
   }
   else if (gdim == 2)
   {
-    coordinate_dofs.resize(8);
+    coordinate_dofs.resize(6);
     coordinate_dofs[0]  = 0.903;
     coordinate_dofs[1]  = 0.341;
     coordinate_dofs[2]  = 0.561;
@@ -70,6 +76,17 @@ std::vector<double> test_coordinate_dofs(int gdim)
     // Huh? Only 4 vertices for triangle, is this for quads?
 //    coordinate_dofs[6]  = 0.123;
 //    coordinate_dofs[7] = 0.561;
+    if (gdeg > 1)
+    {
+      assert(gdeg == 2);
+      coordinate_dofs.resize(12);
+      coordinate_dofs[6]  = 0.750;
+      coordinate_dofs[7]  = 0.901;
+      coordinate_dofs[8]  = 0.999;
+      coordinate_dofs[9]  = 0.500;
+      coordinate_dofs[10] = 0.659;
+      coordinate_dofs[11] = 0.555;
+    }
   }
   else if (gdim == 3)
   {
@@ -86,19 +103,27 @@ std::vector<double> test_coordinate_dofs(int gdim)
     coordinate_dofs[9]  = 0.123;
     coordinate_dofs[10] = 0.561;
     coordinate_dofs[11] = 0.667;
+    if (gdeg > 1)
+    {
+      assert(gdeg == 2);
+      coordinate_dofs.resize(30);
+      // FIXME: Add some quadratic tetrahedron
+      assert(false);
+    }
   }
   return coordinate_dofs;
 }
 
 // Function for creating "random" vertex coordinates
-std::pair<std::vector<double>, std::vector<double>> test_coordinate_dof_pair(int gdim, int facet0, int facet1)
+std::pair<std::vector<double>, std::vector<double>> test_coordinate_dof_pair(int gdim, int gdeg, int facet0, int facet1)
 {
-  // For simplices only.
+  // For affine simplices only so far...
+  assert(gdeg == 1);
 
   // Return pair of cell coordinates there facet0 of cell 0 cooresponds to facet1 of cell 1
   int num_vertices = gdim+1;
-  std::vector<double> c0 = test_coordinate_dofs(gdim);
-  std::vector<double> c1(gdim*num_vertices);
+  std::vector<double> c0 = test_coordinate_dofs(gdim, gdeg);
+  std::vector<double> c1(c0.size());
   std::vector<double> m(gdim);
 
   for (int i=0; i<gdim; ++i)
@@ -235,8 +260,9 @@ void test_finite_element(ufc::finite_element& element, int id, Printer& printer)
 
   // Prepare arguments
   test_cell c(element.cell_shape(), element.geometric_dimension());
+  // NOTE: Assuming geometry degree 1
   const std::vector<double> coordinate_dofs
-    = test_coordinate_dofs(element.geometric_dimension());
+    = test_coordinate_dofs(element.geometric_dimension(), 1);
   std::size_t value_size = 1;
   for (std::size_t i = 0; i < element.value_rank(); i++)
     value_size *= element.value_dimension(i);
@@ -448,6 +474,7 @@ void test_dofmap(ufc::dofmap& dofmap, ufc::shape cell_shape, int id,
 void test_cell_integral(ufc::cell_integral& integral,
                         ufc::shape cell_shape,
                         std::size_t gdim,
+                        std::size_t gdeg,
                         std::size_t tensor_size,
                         double** w,
                         bool bench,
@@ -458,7 +485,7 @@ void test_cell_integral(ufc::cell_integral& integral,
 
   // Prepare arguments
   test_cell c(cell_shape, gdim);
-  const std::vector<double> coordinate_dofs = test_coordinate_dofs(gdim);
+  const std::vector<double> coordinate_dofs = test_coordinate_dofs(gdim, gdeg);
   std::vector<double> A(tensor_size, 0.0);
 
   // Call tabulate_tensor
@@ -496,6 +523,7 @@ void test_cell_integral(ufc::cell_integral& integral,
 void test_exterior_facet_integral(ufc::exterior_facet_integral& integral,
                                   ufc::shape cell_shape,
                                   std::size_t gdim,
+                                  std::size_t gdeg,
                                   std::size_t tensor_size,
                                   double** w,
                                   bool bench,
@@ -506,7 +534,7 @@ void test_exterior_facet_integral(ufc::exterior_facet_integral& integral,
 
   // Prepare arguments
   test_cell c(cell_shape, gdim);
-  const std::vector<double> coordinate_dofs = test_coordinate_dofs(gdim);
+  const std::vector<double> coordinate_dofs = test_coordinate_dofs(gdim, gdeg);
   std::size_t num_facets = c.topological_dimension + 1;
   std::vector<double> A(tensor_size);
 
@@ -553,6 +581,7 @@ void test_exterior_facet_integral(ufc::exterior_facet_integral& integral,
 void test_interior_facet_integral(ufc::interior_facet_integral& integral,
                                   ufc::shape cell_shape,
                                   std::size_t gdim,
+                                  std::size_t gdeg,
                                   std::size_t macro_tensor_size,
                                   double** w,
                                   bool bench,
@@ -573,7 +602,7 @@ void test_interior_facet_integral(ufc::interior_facet_integral& integral,
     for (std::size_t facet1 = 0; facet1 < num_facets; facet1++)
     {
       const std::pair<std::vector<double>, std::vector<double>> coordinate_dofs
-        = test_coordinate_dof_pair(gdim, facet0, facet1);
+        = test_coordinate_dof_pair(gdim, gdeg, facet0, facet1);
 
       for(std::size_t i = 0; i < macro_tensor_size; i++)
         A[i] = 0.0;
@@ -593,7 +622,7 @@ void test_interior_facet_integral(ufc::interior_facet_integral& integral,
   if (bench)
   {
     const std::pair<std::vector<double>, std::vector<double>> coordinate_dofs
-      = test_coordinate_dof_pair(gdim, 0, 0);
+      = test_coordinate_dof_pair(gdim, gdeg, 0, 0);
 
     printer.begin("timing");
     for (std::size_t num_reps = initial_num_reps;; num_reps *= 2)
@@ -628,6 +657,7 @@ void test_interior_facet_integral(ufc::interior_facet_integral& integral,
 void test_vertex_integral(ufc::vertex_integral& integral,
                          ufc::shape cell_shape,
                          std::size_t gdim,
+                         std::size_t gdeg,
                          std::size_t tensor_size,
                          double** w,
                          bool bench,
@@ -638,7 +668,7 @@ void test_vertex_integral(ufc::vertex_integral& integral,
 
   // Prepare arguments
   test_cell c(cell_shape, gdim);
-  const std::vector<double> coordinate_dofs = test_coordinate_dofs(gdim);
+  const std::vector<double> coordinate_dofs = test_coordinate_dofs(gdim, gdeg);
   std::size_t num_vertices = c.topological_dimension + 1;
   std::vector<double> A(tensor_size);
 
@@ -720,6 +750,10 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
   std::unique_ptr<ufc::finite_element> element(form.create_coordinate_finite_element());
   ufc::shape cell_shape = element->cell_shape();
   std::size_t gdim = element->geometric_dimension();
+  std::size_t gdeg = element->degree();
+  assert(element->value_rank() == 1);
+  assert(element->value_dimension(0) == gdim);
+  assert(element->family() == std::string("Lagrange"));
   element.reset();
 
 // signature
@@ -781,7 +815,7 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
     printer.print_scalar("default_cell_integral", (bool)integral);
     if (integral)
     {
-      test_cell_integral(*integral, cell_shape, gdim,
+      test_cell_integral(*integral, cell_shape, gdim, gdeg,
                          tensor_size, w, bench, -1, printer);
     }
   }
@@ -790,7 +824,7 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
     std::unique_ptr<ufc::cell_integral> integral(form.create_cell_integral(i));
     if (integral)
     {
-      test_cell_integral(*integral, cell_shape, gdim,
+      test_cell_integral(*integral, cell_shape, gdim, gdeg,
                          tensor_size, w, bench, i, printer);
     }
   }
@@ -802,7 +836,7 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
     printer.print_scalar("default_exterior_facet_integral", (bool)integral);
     if (integral)
     {
-      test_exterior_facet_integral(*integral, cell_shape, gdim,
+      test_exterior_facet_integral(*integral, cell_shape, gdim, gdeg,
                                    tensor_size, w, bench, -1, printer);
     }
   }
@@ -813,7 +847,7 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
       integral(form.create_exterior_facet_integral(i));
     if (integral)
     {
-      test_exterior_facet_integral(*integral, cell_shape, gdim,
+      test_exterior_facet_integral(*integral, cell_shape, gdim, gdeg,
                                    tensor_size, w, bench, i, printer);
     }
   }
@@ -825,7 +859,7 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
     printer.print_scalar("default_interior_facet_integral", (bool)integral);
     if (integral)
     {
-      test_interior_facet_integral(*integral, cell_shape, gdim,
+      test_interior_facet_integral(*integral, cell_shape, gdim, gdeg,
                                    macro_tensor_size, w, bench, -1, printer);
     }
   }
@@ -835,7 +869,7 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
       integral(form.create_interior_facet_integral(i));
     if (integral)
     {
-      test_interior_facet_integral(*integral, cell_shape, gdim,
+      test_interior_facet_integral(*integral, cell_shape, gdim, gdeg,
                                    macro_tensor_size, w, bench, i, printer);
     }
   }
@@ -847,8 +881,8 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
     printer.print_scalar("default_vertex_integral", (bool)integral);
     if (integral)
     {
-      test_vertex_integral(*integral, cell_shape, gdim, tensor_size, w, bench,
-                          -1, printer);
+      test_vertex_integral(*integral, cell_shape, gdim, gdeg,
+                           tensor_size, w, bench, -1, printer);
     }
   }
   for (std::size_t i = 0; i < form.max_vertex_subdomain_id(); i++)
@@ -857,8 +891,8 @@ void test_form(ufc::form& form, bool bench, int id, Printer & printer)
       integral(form.create_vertex_integral(i));
     if (integral)
     {
-      test_vertex_integral(*integral, cell_shape, gdim, tensor_size, w, bench,
-                          i, printer);
+      test_vertex_integral(*integral, cell_shape, gdim, gdeg,
+                           tensor_size, w, bench, i, printer);
     }
   }
 
diff --git a/test/uflacs/unit/test_cnodes.py b/test/uflacs/unit/test_cnodes.py
index 33943a9..9265515 100644
--- a/test/uflacs/unit/test_cnodes.py
+++ b/test/uflacs/unit/test_cnodes.py
@@ -48,13 +48,16 @@ def test_cnode_expressions():
     # FlattenedArray
     n = Symbol("n")
     decl = ArrayDecl("double", A, (4,))
-    assert str(FlattenedArray(decl, strides=(2,), offset=3)[0]) == "A[3 + 2 * 0]"
-    assert str(FlattenedArray(decl, strides=(2,))[0]) == "A[2 * 0]"
+    assert str(FlattenedArray(decl, strides=(2,), offset=3)[0]) == "A[3]"  # "A[3 + 2 * 0]"
+    assert str(FlattenedArray(decl, strides=(2,))[0]) == "A[0]"  # "A[2 * 0]"
     decl = ArrayDecl("double", A, (2, 3, 4))
     flattened = FlattenedArray(decl, strides=(7, 8 * n, n - 1))
-    assert str(flattened[0, n, n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
-    assert str(flattened[0, n][n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
-    assert str(flattened[0][n][n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    #assert str(flattened[0, n, n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    #assert str(flattened[0, n][n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    #assert str(flattened[0][n][n * 7]) == "A[7 * 0 + 8 * n * n + (n - 1) * (n * 7)]"
+    assert str(flattened[0, n, n * 7]) == "A[8 * n * n + (n - 1) * (n * 7)]"
+    assert str(flattened[0, n][n * 7]) == "A[8 * n * n + (n - 1) * (n * 7)]"
+    assert str(flattened[0][n][n * 7]) == "A[8 * n * n + (n - 1) * (n * 7)]"
 
     # Unary operators
     assert str(Pos(1)) == "+1"
@@ -251,7 +254,7 @@ def test_cnode_loop_statements():
 
     # Using assigns as both statements and expressions
     assert str(While(LT(AssignAdd("x", 4.0), 17.0), AssignAdd("A", "y"))) == "while ((x += 4.0) < 17.0)\n{\n    A += y;\n}"
-    assert str(ForRange("i", 3, 7, AssignAdd("A", "i"))) == "for (int i = 3; i < 7; ++i)\n{\n    A += i;\n}"
+    assert str(ForRange("i", 3, 7, AssignAdd("A", "i"))) == "for (int i = 3; i < 7; ++i)\n    A += i;"
 
 
 def test_cnode_loop_helpers():
@@ -260,12 +263,21 @@ def test_cnode_loop_helpers():
     A = Symbol("A")
     B = Symbol("B")
     C = Symbol("C")
-    src = A[i + 4 * j]
-    dst = 2.0 * B[j] * C[i]
+    dst = A[i + 4 * j]
+    src = 2.0 * B[j] * C[i]
     ranges = [(i, 0, 2), (j, 1, 3)]
-    assert str(assign_loop(src, dst, ranges)) == "for (int i = 0; i < 2; ++i)\n{\n    for (int j = 1; j < 3; ++j)\n    {\n        A[i + 4 * j] = 2.0 * B[j] * C[i];\n    }\n}"
-    assert str(scale_loop(src, dst, ranges)) == "for (int i = 0; i < 2; ++i)\n{\n    for (int j = 1; j < 3; ++j)\n    {\n        A[i + 4 * j] *= 2.0 * B[j] * C[i];\n    }\n}"
-    assert str(accumulate_loop(src, dst, ranges)) == "for (int i = 0; i < 2; ++i)\n{\n    for (int j = 1; j < 3; ++j)\n    {\n        A[i + 4 * j] += 2.0 * B[j] * C[i];\n    }\n}"
+    assert str(assign_loop(dst, src, ranges)) == """\
+for (int i = 0; i < 2; ++i)
+    for (int j = 1; j < 3; ++j)
+        A[i + 4 * j] = 2.0 * B[j] * C[i];"""
+    assert str(scale_loop(dst, src, ranges)) == """\
+for (int i = 0; i < 2; ++i)
+    for (int j = 1; j < 3; ++j)
+        A[i + 4 * j] *= 2.0 * B[j] * C[i];"""
+    assert str(accumulate_loop(dst, src, ranges)) == """\
+for (int i = 0; i < 2; ++i)
+    for (int j = 1; j < 3; ++j)
+        A[i + 4 * j] += 2.0 * B[j] * C[i];"""
 
 
 def test_cnode_switch_statements():
diff --git a/test/uflacs/unit/test_factorization.py b/test/uflacs/unit/test_factorization.py
index 64af1d2..bf40603 100644
--- a/test/uflacs/unit/test_factorization.py
+++ b/test/uflacs/unit/test_factorization.py
@@ -34,11 +34,12 @@ def test_compute_argument_factorization():
     v = TestFunction(V)
     a, b, c, d, e, f, g = [Coefficient(V, count=k) for k in range(7)]
 
+    zero = as_ufl(0.0)
     one = as_ufl(1.0)
     two = as_ufl(2)
 
     # Test workaround for hack in factorization:
-    FVpre = [two]
+    FVpre = [zero, one, two]
     offset = len(FVpre)
 
     # Test basic non-argument terminal
@@ -69,7 +70,7 @@ def test_compute_argument_factorization():
     SV = [v]
     SV_deps = [()]
     AV = [v]
-    FV = FVpre + [one]
+    FV = FVpre + []
     IM = {(0,): 1}  # v == AV[0] * FV[1]
     compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
@@ -77,7 +78,7 @@ def test_compute_argument_factorization():
     SV = [f, v, f * v]
     SV_deps = [(), (), (0, 1)]
     AV = [v]
-    FV = FVpre + [f, one]  # TODO: Why is one at the end here?
+    FV = FVpre + [f]
     IM = {(0,): offset}  # f*v == AV[0] * FV[1]
     compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
@@ -85,7 +86,7 @@ def test_compute_argument_factorization():
     SV = [u, v, u * v]
     SV_deps = [(), (), (0, 1)]
     AV = [v, u]  # Test function < trial function
-    FV = FVpre + [one]
+    FV = FVpre + []
     IM = {(0, 1): 1}  # v*u == (AV[0] * AV[1]) * FV[1]
     compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
@@ -93,8 +94,8 @@ def test_compute_argument_factorization():
     SV = [u, f, v, (f * v), u * (f * v)]
     SV_deps = [(), (), (), (1, 2), (0, 3)]
     AV = [v, u]
-    FV = FVpre + [one, f]
-    IM = {(0, 1): 1 + offset}  # f*(u*v) == (AV[0] * AV[1]) * FV[2]
+    FV = FVpre + [f]
+    IM = {(0, 1): 0 + offset}  # f*(u*v) == (AV[0] * AV[1]) * FV[2]
     compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
 
     # Test more complex situation
@@ -117,13 +118,13 @@ def test_compute_argument_factorization():
                     (13, 14),
                     ]
     AV = [v, u, u.dx(0)]
-    FV = FVpre + [one] + [a, b, c, d, e,  # 0..5
-                          c + d,  # 6, introduced by SV[13]
-                          e * a,  # 7, introduced by SV[14]
-                          e * b,  # 8, introduced by SV[14]
-                          (e * a) * (c + d),  # 9
-                          (e * b) * (c + d),  # 10
-                          ]
-    IM = {(0, 1): 9 + offset,  # (a*e)*(c+d)*(u*v) == (AV[0] * AV[2]) * FV[13]
-          (0, 2): 10 + offset}  # (b*e)*(c+d)*(u.dx(0)*v) == (AV[1] * AV[2]) * FV[12]
+    FV = FVpre + [a, b, c, d, e,  # 0..5
+                  c + d,  # 6, introduced by SV[13]
+                  e * a,  # 7, introduced by SV[14]
+                  e * b,  # 8, introduced by SV[14]
+                  (e * a) * (c + d),  # 9
+                  (e * b) * (c + d),  # 10
+                  ]
+    IM = {(0, 1): 8 + offset,  # (a*e)*(c+d)*(u*v) == (AV[0] * AV[2]) * FV[13]
+          (0, 2): 9 + offset}  # (b*e)*(c+d)*(u.dx(0)*v) == (AV[1] * AV[2]) * FV[12]
     compare_compute_argument_factorization(SV, SV_deps, AV, FV, IM)
diff --git a/test/uflacs/unit/test_format_code_structure.py b/test/uflacs/unit/test_format_code_structure.py
index c063e5d..2858dc9 100644
--- a/test/uflacs/unit/test_format_code_structure.py
+++ b/test/uflacs/unit/test_format_code_structure.py
@@ -246,12 +246,12 @@ def test_while_loop():
 
 
 def test_for_loop():
-    code = For("int i = 0;", "i < 3", "++i", [])
+    code = For("int i = 0", "i < 3", "++i", [])
     actual = str(code)
     expected = "for (int i = 0; i < 3; ++i)\n{\n}"
     assert actual == expected
 
-    code = For("int i = 0;", "i < 3", "++i", body=["ting;", "tang;"])
+    code = For("int i = 0", "i < 3", "++i", body=["ting;", "tang;"])
     actual = str(code)
     expected = "for (int i = 0; i < 3; ++i)\n{\n    ting;\n    tang;\n}"
     assert actual == expected
diff --git a/test/uflacs/unit/test_graph_algorithm.py b/test/uflacs/unit/test_graph_algorithm.py
index b512c54..2559169 100644
--- a/test/uflacs/unit/test_graph_algorithm.py
+++ b/test/uflacs/unit/test_graph_algorithm.py
@@ -13,7 +13,7 @@ from ufl.permutation import compute_indices
 from ffc.uflacs.analysis.graph import build_graph
 from ffc.uflacs.analysis.graph_rebuild import rebuild_expression_from_graph
 # from ffc.uflacs.analysis.graph_rebuild import rebuild_scalar_e2i
-# from ffc.uflacs.analysis.graph_dependencies import (compute_dependencies,
+# from ffc.uflacs.analysis.dependencies import (compute_dependencies,
 #                                                mark_active,
 #                                                mark_image)
 # from ffc.uflacs.analysis.graph_ssa import (mark_partitions,
diff --git a/test/uflacs/unit/test_snippets.py b/test/uflacs/unit/test_snippets.py
index 6859310..9b57b71 100644
--- a/test/uflacs/unit/test_snippets.py
+++ b/test/uflacs/unit/test_snippets.py
@@ -1,27 +1,45 @@
 # -*- coding: utf-8 -*-
 
-from ffc.uflacs.language.format_value import format_float, set_float_precision, reset_float_precision
+from ffc.uflacs.language.format_value import format_float
 from ffc.uflacs.language.format_lines import iter_indented_lines, Indented, format_indented_lines
 
 
 def test_format_float():
-    reset_float_precision()
-    assert format_float(0.0) == "0.0"
-    assert format_float(1.0) == "1.0"
-    assert format_float(12.) == "12.0"
-
-    set_float_precision(3)
-    assert format_float(0.0) == "0.0"
-    assert format_float(1.0) == "1.0"
-    assert format_float(1.2) == "1.2"
-    assert format_float(1.23) == "1.23"
-
-    set_float_precision(15, 1e-15)
-    assert format_float(0.0) == "0.0"
-    assert format_float(1.0) == "1.0"
-    assert format_float(12.) == "12.0"  # 1.2e+01
-
-    reset_float_precision()
+    # Ints handled
+    assert format_float(0, 3) == "0.0"
+    assert format_float(1, 3) == "1.0"
+    assert format_float(12, 15) == "12.0"
+
+    # Zeros simple
+    assert format_float(0.0, 0) == "0.0"
+    assert format_float(0.0, 3) == "0.0"
+    assert format_float(0.0, 15) == "0.0"
+
+    # Ones simple
+    assert format_float(1.0, 0) == "1.0"
+    assert format_float(1.0, 3) == "1.0"
+    assert format_float(1.0, 15) == "1.0"
+
+    # Small ints simple
+    assert format_float(12., 15) == "12.0"
+    assert format_float(12., 15) == "12.0"
+
+    # Precision truncates
+    assert format_float(1.2, 3) == "1.2"
+    assert format_float(1.23, 3) == "1.23"
+    assert format_float(1.2345, 3) == "1.23"
+    assert format_float(1.0 + 1e-5, 7) == "1.00001"
+    assert format_float(1.0 + 1e-5, 6) == "1.00001"
+    assert format_float(1.0 + 1e-5, 5) == "1.0"
+
+    # Cleanly formatted exponential numbers
+    # without superfluous +s and 0s
+    assert format_float(1234567.0, 3) == "1.23e6"
+    assert format_float(1.23e6, 3) == "1.23e6"
+    assert format_float(1.23e-6, 3) == "1.23e-6"
+    assert format_float(-1.23e-6, 3) == "-1.23e-6"
+    assert format_float(-1.23e6, 3) == "-1.23e6"
+
 
 
 def test_iter_indented_lines():
diff --git a/test/uflacs/unit/test_ssa_manipulations.py b/test/uflacs/unit/test_ssa_manipulations.py
index e33d23c..4c53d2c 100644
--- a/test/uflacs/unit/test_ssa_manipulations.py
+++ b/test/uflacs/unit/test_ssa_manipulations.py
@@ -22,7 +22,7 @@ from ffc.uflacs.analysis.graph import build_graph
 #                                        map_component_tensor_arg_components)
 # from ffc.uflacs.analysis.graph_symbols import (map_list_tensor_symbols,
 #                                             map_transposed_symbols, get_node_symbols)
-# from ffc.uflacs.analysis.graph_dependencies import (compute_dependencies,
+# from ffc.uflacs.analysis.dependencies import (compute_dependencies,
 #                                                mark_active,
 #                                                mark_image)
 # from ffc.uflacs.analysis.graph_ssa import (mark_partitions,
diff --git a/test/uflacs/unit/test_table_utils.py b/test/uflacs/unit/test_table_utils.py
index 4746791..3abdf1f 100644
--- a/test/uflacs/unit/test_table_utils.py
+++ b/test/uflacs/unit/test_table_utils.py
@@ -8,11 +8,19 @@ from __future__ import print_function
 from ufl import triangle
 from six import itervalues, iteritems
 from six.moves import xrange as range
-from ffc.uflacs.elementtables.table_utils import equal_tables, strip_table_zeros, build_unique_tables, get_ffc_table_values
+from ffc.uflacs.elementtables import equal_tables, strip_table_zeros, build_unique_tables, get_ffc_table_values
 
 import numpy as np
 default_tolerance = 1e-14
 
+
+def old_strip_table_zeros(table, eps):
+    "Quick fix translating new version to old behaviour to keep tests without modification."
+    dofrange, dofmap, stripped_table = strip_table_zeros(table, False, eps)
+    begin, end = dofrange
+    return begin, end, stripped_table
+
+
 def test_equal_tables():
     a = np.zeros((2, 3))
     b = np.zeros((2, 3))
@@ -35,43 +43,44 @@ def test_equal_tables():
     a = np.ones((2, 3))*(1.0+eps)
     assert equal_tables(a, np.ones(a.shape), 10*eps)
 
+
 def test_strip_table_zeros():
     # Can strip entire table:
     a = np.zeros((2, 3))
     e = np.zeros((2, 0))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
-    assert begin == a.shape[-1]
-    assert end == a.shape[-1]
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
     assert begin == end # This is a way to check for all-zero table
+    assert begin == 0
+    assert end == 0
     assert equal_tables(b, e, default_tolerance)
 
     # Can keep entire nonzero table:
     a = np.ones((2, 3))
     e = np.ones((2, 3))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
+    assert begin != end
     assert begin == 0
     assert end == a.shape[-1]
-    assert begin != end
     assert equal_tables(b, e, default_tolerance)
 
     # Can strip one left side column:
     a = np.ones((2, 3))
     a[:, 0] = 0.0
     e = np.ones((2, 2))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
+    assert begin != end
     assert begin == 1
     assert end == a.shape[-1]
-    assert begin != end
     assert equal_tables(b, e, default_tolerance)
 
     # Can strip one right side column:
     a = np.ones((2, 3))
     a[:, 2] = 0.0
     e = np.ones((2, 2))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
+    assert begin != end
     assert begin == 0
     assert end == a.shape[-1]-1
-    assert begin != end
     assert equal_tables(b, e, default_tolerance)
 
     # Can strip two columns on each side:
@@ -81,10 +90,10 @@ def test_strip_table_zeros():
     a[:, 3] = 0.0
     a[:, 4] = 0.0
     e = np.ones((2, 1))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
+    assert begin != end
     assert begin == 2
     assert end == a.shape[-1]-2
-    assert begin != end
     assert equal_tables(b, e, default_tolerance)
 
     # Can strip two columns on each side of rank 1 table:
@@ -94,10 +103,10 @@ def test_strip_table_zeros():
     a[..., 3] = 0.0
     a[..., 4] = 0.0
     e = np.ones((1,))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
+    assert begin != end
     assert begin == 2
     assert end == a.shape[-1]-2
-    assert begin != end
     assert equal_tables(b, e, default_tolerance)
 
     # Can strip two columns on each side of rank 3 table:
@@ -107,12 +116,31 @@ def test_strip_table_zeros():
     a[..., 3] = 0.0
     a[..., 4] = 0.0
     e = np.ones((3, 2, 1))
-    begin, end, b = strip_table_zeros(a, default_tolerance)
+    begin, end, b = old_strip_table_zeros(a, default_tolerance)
+    assert begin != end
     assert begin == 2
     assert end == a.shape[-1]-2
+    assert equal_tables(b, e, default_tolerance)
+
+    # Can strip columns on each side and compress the one in the middle:
+    a = np.ones((2, 5))
+    a[:, 0] = 0.0
+    a[:, 1] = 1.0
+    a[:, 2] = 0.0
+    a[:, 3] = 3.0
+    a[:, 4] = 0.0
+    e = np.ones((2, 2))
+    e[:, 0] = 1.0
+    e[:, 1] = 3.0
+    dofrange, dofmap, b = strip_table_zeros(a, True, default_tolerance)
+    begin, end = dofrange
     assert begin != end
+    assert begin == 1
+    assert end == a.shape[-1] - 1
+    assert list(dofmap) == [1, 3]
     assert equal_tables(b, e, default_tolerance)
 
+
 def test_unique_tables_some_equal():
     tables = [
         np.zeros((2,)),
@@ -137,6 +165,7 @@ def test_unique_tables_some_equal():
     for i, t in enumerate(tables):
         assert equal_tables(t, unique[mapping[i]], default_tolerance)
 
+
 def test_unique_tables_all_equal():
     tables = [np.ones((3, 5))*2.0]*6
     unique, mapping = build_unique_tables(tables, default_tolerance)
@@ -191,8 +220,10 @@ def xtest_get_ffc_table_values_scalar_cell():
     cell = triangle
     integral_type = "cell"
     entitytype = "cell"
+
     class MockElement:
         def value_shape(self): return ()
+
     element = MockElement()
     component = ()
 
@@ -216,7 +247,7 @@ def xtest_get_ffc_table_values_scalar_cell():
                 }
                 table = get_ffc_table_values(ffc_tables,
                     cell, integral_type,
-                    num_points, element, avg,
+                    element, avg,
                     entitytype, derivatives, component, 
                     default_tolerance)
                 assert equal_tables(table[0, ...], np.transpose(arr), default_tolerance)
@@ -227,8 +258,10 @@ def xtest_get_ffc_table_values_vector_facet():
     integral_type = "exterior_facet"
     entitytype = "facet"
     num_entities = 3
+
     class MockElement:
         def value_shape(self): return (2,)
+
     element = MockElement()
     num_components = 2
 
@@ -264,7 +297,7 @@ def xtest_get_ffc_table_values_vector_facet():
                 for component in range(num_components):
                     table = get_ffc_table_values(ffc_tables,
                         cell, integral_type,
-                        num_points, element, avg,
+                        element, avg,
                         entitytype, derivatives, component,
                         default_tolerance)
                     for i in range(num_entities):
diff --git a/test/uflacs/unit/test_ufc_backend.py b/test/uflacs/unit/test_ufc_backend.py
index dc4da82..cb4fd6d 100644
--- a/test/uflacs/unit/test_ufc_backend.py
+++ b/test/uflacs/unit/test_ufc_backend.py
@@ -313,6 +313,7 @@ def test_debug_by_printing_extracted_function():
     print(extract_function(name, cpp))
     print("/// end")
 
+
 """
 Missing:
 
diff --git a/test/unit/symbolics/test_float.py b/test/unit/symbolics/test_float.py
index 5523165..678c909 100755
--- a/test/unit/symbolics/test_float.py
+++ b/test/unit/symbolics/test_float.py
@@ -36,10 +36,11 @@ def testFloat():
 
     assert repr(f0) == "FloatValue(%s)" % format["float"](1.5)
     assert repr(f1) == "FloatValue(%s)" % format["float"](-5)
-    assert repr(f2) == "FloatValue(%s)" % format["float"](0)
+    # This depends on the chosen precision and certain rounding behaviour in code generation:
+    #assert repr(f2) == "FloatValue(%s)" % format["float"](0.0)
     assert repr(f3) == "FloatValue(%s)" % format["float"](-1e-11)
 
-    assert f2.val == 0
+    #assert f2.val == 0  # This test documents incorrect float behaviour of quadrature repsentation
     assert not f3.val == 0
 
     assert f0.ops() == 0
@@ -50,7 +51,9 @@ def testFloat():
     assert f0 == f4
     assert f1 != f3
     assert not f0 < f1
-    assert f2 > f3
+    #assert f2 > f3
+    # ^^^ NB! The > operator of FloatValue is not a comparison of numbers,
+    # it does something else entirely, and is affected by precision in indirect ways.
 
     # Test hash
     l = [f0]

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/fenics/ffc.git



More information about the debian-science-commits mailing list