[h5py] 01/01: Imported Upstream version 2.6.0

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Tue Feb 2 10:37:48 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch upstream/latest
in repository h5py.

commit af0f1fef399ddd9c4700eeb74d93a685f2dc89f6
Author: Ghislain Antony Vaillant <ghisvail at gmail.com>
Date:   Tue Feb 2 10:23:19 2016 +0000

    Imported Upstream version 2.6.0
---
 .gitignore                            |   1 +
 .travis.yml                           |  45 ++--
 MANIFEST.in                           |   2 +
 README.rst                            |   2 +-
 api_gen.py                            |   1 +
 docs/build.rst                        |  98 +++------
 docs/conf.py                          |   4 +-
 docs/faq.rst                          |   9 +-
 docs/high/attr.rst                    |   4 +-
 docs/high/dataset.rst                 |   4 +-
 docs/high/dims.rst                    |   4 +-
 docs/high/file.rst                    |   4 +-
 docs/high/group.rst                   |   4 +-
 docs/index.rst                        |  56 ++---
 docs/low.rst                          |   4 -
 docs/mpi.rst                          |   5 +-
 docs/quick.rst                        |  15 +-
 docs_api/automod.py                   |  12 +-
 examples/collective_io.py             |  59 ++++++
 h5py/__init__.py                      |  68 +++---
 h5py/_hl/__init__.py                  |   7 +
 h5py/_hl/attrs.py                     |  18 +-
 h5py/_hl/base.py                      |  84 +++++---
 h5py/_hl/dataset.py                   |  94 ++++++---
 h5py/_hl/datatype.py                  |   6 +-
 h5py/_hl/dims.py                      |  71 +++++--
 h5py/_hl/files.py                     |  32 ++-
 h5py/_hl/filters.py                   |  28 ++-
 h5py/_hl/group.py                     |  33 ++-
 h5py/_hl/selections.py                |  90 +-------
 h5py/_hl/selections2.py               |  13 +-
 h5py/_objects.pyx                     |   7 +
 h5py/api_functions.txt                |  46 +++--
 h5py/api_types_hdf5.pxd               |  31 ++-
 h5py/h5.pyx                           |  15 +-
 h5py/h5d.pyx                          |  34 +--
 h5py/h5fd.pxd                         |   3 +-
 h5py/h5fd.pyx                         |   5 +
 h5py/h5p.pyx                          | 189 ++++++++++++++++-
 h5py/h5s.pyx                          |  50 ++++-
 h5py/h5t.pyx                          |  22 +-
 h5py/highlevel.py                     |   9 +
 h5py/ipy_completer.py                 |  63 +++---
 h5py/tests/hl/__init__.py             |   8 +-
 h5py/tests/hl/test_dataset_getitem.py |  58 ++++++
 h5py/tests/hl/test_datatype.py        |  45 ++++
 h5py/tests/hl/test_threads.py         |  61 ++++++
 h5py/tests/old/test_dataset.py        |  16 ++
 h5py/version.py                       |  12 +-
 pylintrc                              | 377 ++++++++++++++++++++++++++++++++++
 setup.py                              |  17 +-
 setup_build.py                        |  48 +++--
 tox.ini                               |  20 +-
 53 files changed, 1507 insertions(+), 506 deletions(-)

diff --git a/.gitignore b/.gitignore
index 4bc92a9..b1ea6b5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,3 +20,4 @@ MANIFEST
 /.tox
 /h5py.egg-info
 /*.egg
+.eggs/
diff --git a/.travis.yml b/.travis.yml
index f5d6d1c..2e9460f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,21 +3,36 @@ language: python
 notifications:
   email: false
 
-python:
-  - "2.6"
-  - "2.7"
-  - "3.2"
-  - "3.3"
-  - "3.4"
+addons:
+  apt:
+    packages:
+      - libhdf5-serial-dev
 
-before_install:
-    - sudo apt-get update -qq
-    - sudo apt-get install -qq libhdf5-serial-dev
-    - if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then pip install unittest2; fi
-    - pip install numpy
-    - pip install --install-option="--no-cython-compile" cython
+sudo: false
 
-install: 
-    - python setup.py build -f
+cache:
+  directories:
+    - $HOME/.cache/pip
 
-script: "python setup.py test"
+env:
+# Commented out since setup_requires is controlled by easy_install
+# This should be uncommented when pip can use setup_requires
+#  - TOXENV=py26-test-nodeps
+#  - TOXENV=py27-test-nodeps
+#  - TOXENV=py32-test-nodeps
+#  - TOXENV=py33-test-nodeps
+#  - TOXENV=py34-test-nodeps
+  - TOXENV=py26-test-deps
+  - TOXENV=py27-test-deps
+  - TOXENV=py32-test-deps
+  - TOXENV=py33-test-deps
+  - TOXENV=py34-test-deps
+  - TOXENV=py27-pylint-deps
+  - TOXENV=py33-pylint-deps
+  - TOXENV=py34-pylint-deps
+
+install:
+  - pip install tox
+
+script:
+  - tox
diff --git a/MANIFEST.in b/MANIFEST.in
index d7a23d2..3c76717 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -13,5 +13,7 @@ include setup_configure.py
 include ANN.rst
 include README.rst
 recursive-include docs *
+prune docs/_build
 recursive-include docs_api *
+prune docs_api/_build
 recursive-exclude * .DS_Store
diff --git a/README.rst b/README.rst
index 50a3b80..0e4d106 100644
--- a/README.rst
+++ b/README.rst
@@ -25,7 +25,7 @@ You need, at a minimum:
 To build on UNIX:
 
 * HDF5 1.8.4 or later (on Windows, HDF5 comes with h5py)
-* Cython 0.17 or later
+* Cython 0.19 or later
 * If using Python 2.6, unittest2 is needed to run the tests
 
 Installing on Windows
diff --git a/api_gen.py b/api_gen.py
index e379f9c..b4bd0d5 100644
--- a/api_gen.py
+++ b/api_gen.py
@@ -217,6 +217,7 @@ class LineProcessor(object):
         imp = """\
 cdef {0.code} {0.fname}({0.sig}) except *:
     cdef {0.code} r
+    _hdf5.H5Eset_auto(NULL, NULL)
     r = _hdf5.{0.fname}({0.args})
     if r{condition}:
         if set_exception():
diff --git a/docs/build.rst b/docs/build.rst
index f626b45..d1ea4b2 100644
--- a/docs/build.rst
+++ b/docs/build.rst
@@ -3,90 +3,55 @@
 Installation
 ============
 
+Pre-configured installation (recommended)
+-----------------------------------------
 
-For Python beginners
---------------------
+It's strongly recommended that you use a Python distribution or package
+manager to install h5py along with its compiled dependencies.  Here are some
+which are popular in the Python community:
 
-It can be a pain to install NumPy, HDF5, h5py, Cython and other dependencies.
-If you're just starting out, by far the easiest approach is to install h5py via
-your package manager (``apt-get`` or similar), or by using one of the major
-science-oriented Python distributions:
-
-* `Anaconda <http://continuum.io/downloads>`_ (Linux, Mac, Windows)
+* `Anaconda <http://continuum.io/downloads>`_ or `Miniconda <http://conda.pydata.org/miniconda.html>`_ (Mac, Windows, Linux)
+* `Enthought Canopy <https://www.enthought.com/products/canopy/>`_ (Mac, Windows, Linux)
 * `PythonXY <https://code.google.com/p/pythonxy/>`_ (Windows)
 
+::
 
-Installing on Windows
----------------------
-
-You will need:
-
-  * Python 2.6, 2.7, 3.2, 3.3 or 3.4 (from Python.org)
-  * NumPy 1.6.1 or newer
-  * The "six" Python 2/3 compatibility package
-
-Download the installer from http://www.h5py.org and run it.  HDF5 is
-included.
-
-.. note::
+    conda install h5py  # Anaconda/Miniconda
+    enpkg h5py          # Canopy
 
-    If you are using Anaconda, PythonXY or another non-Python.org
-    distribution, you should instead install h5py via your distribution's
-    own pacakge manager.
+Or, use your package manager:
 
+* apt-get (Linux/Debian, including Ubuntu)
+* yum (Linux/Red Hat, including Fedora and CentOS)
+* Homebrew (OS X)
 
-Installing on Linux and Mac OS X
---------------------------------
 
-System dependencies
-~~~~~~~~~~~~~~~~~~~
+Source installation on Linux and OS X
+-------------------------------------
 
-You will need:
+You need, via apt-get, yum or Homebrew:
 
-* Python 2.6, 2.7, 3.2, 3.3, or 3.4 with development headers (``python-dev`` or similar)
+* Python 2.6, 2.7, 3.3, or 3.4 with development headers (``python-dev`` or similar)
 * HDF5 1.8.4 or newer, shared library version with development headers (``libhdf5-dev`` or similar)
+* NumPy 1.6.1 or later
 
-On Mac OS X, `homebrew <http://brew.sh>`_ is a reliable way of getting
-Python, HDF5 and other dependencies set up.  It is also safe to use h5py
-with the OS X system Python.
-
-Install with pip
-~~~~~~~~~~~~~~~~
-
-Simply run::
+::
 
     $ pip install h5py
-    
-All dependencies are installed automatically.
 
-Via setup.py
-~~~~~~~~~~~~
+or, from a tarball::
 
-You will need:
-
-* The h5py tarball from http://www.h5py.org.
-* NumPy 1.6.1 or newer
-* `Cython <http://cython.org>`_ 0.17 or newer
-
-::
-
-    $ tar xzf h5py-X.Y.Z.tar.gz
-    $ cd h5py
     $ python setup.py install
 
 
-Running the test suite
-----------------------
-
-With the tarball version of h5py::
-
-    $ python setup.py build
-    $ python setup.py test
+Source installation on Windows
+------------------------------
 
-After installing h5py::
+Installing from source on Windows is effectively impossible because of the C
+library dependencies involved.
 
-    >>> import h5py
-    >>> h5py.run_tests()
+If you don't want to use Anaconda, Canopy, or PythonXY, download
+a `third-party wheel from Chris Gohlke's excellent collection <http://www.lfd.uci.edu/~gohlke/pythonlibs/>`_.
 
 
 Custom installation
@@ -149,12 +114,3 @@ To build in MPI mode, use the ``--mpi`` option to ``setup.py configure``::
 See also :ref:`parallel`.
 
 
-Help! It didn't work!
----------------------
-
-You may wish to check the :ref:`faq` first for common installation problems.
-
-Then, feel free to ask the discussion group
-`at Google Groups <http://groups.google.com/group/h5py>`_. There's
-only one discussion group for h5py, so you're likely to get help directly
-from the maintainers.
diff --git a/docs/conf.py b/docs/conf.py
index e95bf26..0b3e34a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -53,9 +53,9 @@ copyright = u'2014, Andrew Collette and contributors'
 # built documents.
 #
 # The short X.Y version.
-version = '2.5'
+version = '2.6'
 # The full version, including alpha/beta/rc tags.
-release = '2.5.0'
+release = '2.6.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/docs/faq.rst b/docs/faq.rst
index 58b75fc..4f1dcea 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -26,6 +26,7 @@ Boolean                             NumPy 1-byte bool
 Array                               Any supported type
 Enumeration                         Any NumPy integer type                          Read/write as integers
 References                          Region and object
+Variable length array               Any supported type                              See :ref:`Special Types <vlen>`
 =========================           ============================================    ======================
 
 Unsupported types:
@@ -33,7 +34,6 @@ Unsupported types:
 =========================           ============================================
 Type                                Status                                 
 =========================           ============================================
-HDF5 VLEN (non-string)              Pull requests welcome
 HDF5 "time" type
 NumPy "U" strings                   No HDF5 equivalent
 NumPy generic "O"                   Not planned
@@ -99,7 +99,7 @@ This greatly simplifies the design of the complicated high-level interface, by
 relying on the "Pythonicity" of the C API wrapping.
 
 There's also a PyTables perspective on this question at the 
-`PyTables FAQ <http://www.pytables.org/moin/FAQ#HowdoesPyTablescomparewiththeh5pyproject.3F>`_.
+`PyTables FAQ <http://www.pytables.org/FAQ.html#how-does-pytables-compare-with-the-h5py-project>`_.
 
 
 Does h5py support Parallel HDF5?
@@ -113,8 +113,9 @@ Check out :ref:`parallel` for details.
 Variable-length (VLEN) data
 ---------------------------
 
-Variable-length byte and unicode strings are supported by h5py. However, generic
-(non-string) VLEN data cannot yet be processed. Please note that since strings
+Starting with version 2.3, all supported types can be stored in variable-length
+arrays (previously only variable-length byte and unicode strings were supported)
+See :ref:`Special Types <vlen>` for use details.  Please note that since strings
 in HDF5 are encoded as ASCII or UTF-8, NUL bytes are not allowed in strings.
 
 
diff --git a/docs/high/attr.rst b/docs/high/attr.rst
index 98f1fcb..b67cbc2 100644
--- a/docs/high/attr.rst
+++ b/docs/high/attr.rst
@@ -1,8 +1,8 @@
 .. _attributes:
 
 
-HDF5 Attributes
-===============
+Attributes
+==========
 
 Attributes are a critical part of what makes HDF5 a "self-describing"
 format.  They are small named pieces of data attached directly to
diff --git a/docs/high/dataset.rst b/docs/high/dataset.rst
index c5cfc25..9bc4489 100644
--- a/docs/high/dataset.rst
+++ b/docs/high/dataset.rst
@@ -1,8 +1,8 @@
 .. _dataset:
 
 
-HDF5 Datasets
-=============
+Datasets
+========
 
 Datasets are very similar to NumPy arrays.  They are homogenous collections of
 data elements, with an immutable datatype and (hyper)rectangular shape.
diff --git a/docs/high/dims.rst b/docs/high/dims.rst
index 37cbc5d..e36f92e 100644
--- a/docs/high/dims.rst
+++ b/docs/high/dims.rst
@@ -1,7 +1,7 @@
 .. _dimension_scales:
 
-HDF5 Dimension Scales
-=====================
+Dimension Scales
+================
 
 Datasets are multidimensional arrays. HDF5 provides support for labeling the
 dimensions and associating one or "dimension scales" with each dimension. A
diff --git a/docs/high/file.rst b/docs/high/file.rst
index 81b35ba..980c5bf 100644
--- a/docs/high/file.rst
+++ b/docs/high/file.rst
@@ -1,8 +1,8 @@
 .. _file:
 
 
-HDF5 File Objects
-=================
+File Objects
+============
 
 File objects serve as your entry point into the world of HDF5.  In addition
 to the File-specific capabilities listed here, every File instance is
diff --git a/docs/high/group.rst b/docs/high/group.rst
index 08694a0..b522199 100644
--- a/docs/high/group.rst
+++ b/docs/high/group.rst
@@ -1,8 +1,8 @@
 .. _group:
 
 
-HDF5 Groups
-===========
+Groups
+======
 
 
 Groups are the container mechanism by which HDF5 files are organized.  From
diff --git a/docs/index.rst b/docs/index.rst
index a7ae27e..be61a05 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -3,43 +3,32 @@ HDF5 for Python
 
 The h5py package is a Pythonic interface to the HDF5 binary data format.
 
-`HDF5 <http://hdfgroup.org>`_ is an open-source library and file format for 
-storing large amounts of numerical data, originally developed at NCSA.  It is 
-widely used in the scientific community for everything from NASA's Earth
-Observing System to the storage of data from laboratory experiments and 
-simulations.  Over the past few years, HDF5 has rapidly emerged as the de-facto
-standard  technology in Python to store large numerical datasets.
+`HDF5 <http://hdfgroup.org>`_ lets you store huge amounts of numerical
+data, and easily manipulate that data from NumPy. For example, you can slice
+into multi-terabyte datasets stored on disk, as if they were real NumPy
+arrays. Thousands of datasets can be stored in a single file, categorized and
+tagged however you want.
 
-This is the reference documentation for the h5py package.  Check out
-the :ref:`quick` if you're new to h5py and HDF5.
+Where to start
+--------------
 
-The lead author of h5py, Andrew Collette, also wrote
-`an O'Reilly book <http://shop.oreilly.com/product/0636920030249.do>`_
-which provides a comprehensive, example-based introduction to using Python
-and HDF5 together.
+* :ref:`Quick-start guide <quick>`
+* :ref:`Installation <install>`
 
-Getting h5py
-------------
 
-Downloads are at http://www.h5py.org.  It can be tricky to install all the
-C library dependencies for h5py, so check out the :ref:`install guide <install>`
-first.
-
-
-Getting help
--------------
+Other resources
+---------------
 
-Tutorial and reference documentation is available here at http://docs.h5py.org.
-We also have a mailing list `at Google Groups <http://groups.google.com/d/forum/h5py>`_.
-Anyone is welcome to post; the list is read by both users and the core developers
-of h5py.
+* `Python and HDF5 O'Reilly book <http://shop.oreilly.com/product/0636920030249.do>`_
+* `Ask questions on the mailing list at Google Groups <http://groups.google.com/d/forum/h5py>`_
+* `GitHub project <https://github.com/h5py/h5py>`_
 
 
 Introductory info
 -----------------
 
 .. toctree::
-    :maxdepth: 2
+    :maxdepth: 1
 
     quick
     build
@@ -49,7 +38,7 @@ High-level API reference
 ------------------------
 
 .. toctree::
-    :maxdepth: 2
+    :maxdepth: 1
 
     high/file
     high/group
@@ -62,7 +51,7 @@ Advanced topics
 ---------------
 
 .. toctree::
-    :maxdepth: 2
+    :maxdepth: 1
     
     config
     special
@@ -72,20 +61,11 @@ Advanced topics
     swmr
 
 
-Low-level API reference
------------------------
-
-.. toctree::
-    :maxdepth: 2
-
-    low
-
-
 Meta-info about the h5py project
 --------------------------------
 
 .. toctree::
-    :maxdepth: 2
+    :maxdepth: 1
 
     whatsnew/index
     contributing
diff --git a/docs/low.rst b/docs/low.rst
deleted file mode 100644
index 5ab6926..0000000
--- a/docs/low.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Low-Level Interface
-===================
-
-Now located at http://api.h5py.org.
\ No newline at end of file
diff --git a/docs/mpi.rst b/docs/mpi.rst
index f90c3de..4336d82 100644
--- a/docs/mpi.rst
+++ b/docs/mpi.rst
@@ -21,7 +21,7 @@ excellent, complete Python bindings for MPI.  Here's an example
 "Hello World" using ``mpi4py``::
 
     >>> from mpi4py import MPI
-    >>> print "Hello World (from process %d)" % MPI.COMM_WORLD.rank
+    >>> print "Hello World (from process %d)" % MPI.COMM_WORLD.Get_rank()
 
 To run an MPI-based parallel program, use the ``mpiexec`` program to launch
 several parallel instances of Python::
@@ -60,7 +60,8 @@ Once you've got a Parallel-enabled build of HDF5, h5py has to be compiled in
 and build h5py with the ``--mpi`` option::
 
     $ export CC=mpicc
-    $ python setup.py build --mpi [--hdf5=/path/to/parallel/hdf5]
+    $ python setup.py configure --mpi [--hdf5=/path/to/parallel/hdf5]
+    $ python setup.py build
 
 
 Using Parallel HDF5 from h5py
diff --git a/docs/quick.rst b/docs/quick.rst
index f70a682..ebd4a48 100644
--- a/docs/quick.rst
+++ b/docs/quick.rst
@@ -3,7 +3,20 @@
 Quick Start Guide
 =================
 
-If you're having trouble installing h5py, refer to :ref:`install`.
+Install
+-------
+
+With `Anaconda <http://continuum.io/downloads>`_ or 
+`Miniconda <http://conda.pydata.org/miniconda.html>`_::
+
+    conda install h5py
+    
+With `Enthought Canopy <https://www.enthought.com/products/canopy/>`_, use
+the GUI package manager or::
+
+    enpkg h5py
+
+With pip or setup.py, see :ref:`install`.
 
 Core concepts
 -------------
diff --git a/docs_api/automod.py b/docs_api/automod.py
index 0fb4839..3d2ec2c 100644
--- a/docs_api/automod.py
+++ b/docs_api/automod.py
@@ -48,9 +48,15 @@ class_exprs = { "ObjectID": "h5py.h5.ObjectID",
                 "[Gg]roup creation property list": "h5py.h5p.PropGCID"}
 
 
-class_exprs = dict( 
-    (re.compile(class_base % x.replace(" ",r"\s"), re.VERBOSE), y) \
-    for x, y in class_exprs.iteritems() )
+try:
+    class_exprs = dict( 
+        (re.compile(class_base % x.replace(" ",r"\s"), re.VERBOSE), y) \
+        for x, y in class_exprs.iteritems() )
+except AttributeError:
+    class_exprs = dict( 
+        (re.compile(class_base % x.replace(" ",r"\s"), re.VERBOSE), y) \
+        for x, y in class_exprs.items() )
+
 
 def replace_class(istr):
 
diff --git a/examples/collective_io.py b/examples/collective_io.py
new file mode 100644
index 0000000..45932c9
--- /dev/null
+++ b/examples/collective_io.py
@@ -0,0 +1,59 @@
+# This file is to test collective io in h5py
+ 
+"""
+Author:  Jialin Liu, jalnliu at lbl.gov
+Date:    Nov 17, 2015
+Prerequisites: python 2.5.0, mpi4py and numpy
+Source Codes: Already submit this 'collective io' branch to h5py master, meanwhile, can download this branch at https://github.com/valiantljk/h5py.git
+Note: Must build the h5py with parallel hdf5
+"""
+
+from mpi4py import MPI
+import numpy as np
+import h5py
+import time
+import sys
+
+#"run as "mpirun -np 64 python-mpi collective_io.py 1 file.h5" 
+#(1 is for collective write, ohter number for non-collective write)"
+
+colw=1 #default is collective write
+filename="parallel_test.hdf5"
+if len(sys.argv)>2:
+	colw = int(sys.argv[1])
+	filename=str(sys.argv[2])
+comm =MPI.COMM_WORLD
+nproc = comm.Get_size()
+f = h5py.File(filename, 'w', driver='mpio', comm=MPI.COMM_WORLD)
+rank = comm.Get_rank()
+length_x = 6400*1024
+length_y = 1024
+dset = f.create_dataset('test', (length_x,length_y), dtype='f8')
+#data type should be consistent in numpy and h5py, e.g., 64 bits
+#otherwise, hdf5 layer will fall back to independent io.
+f.atomic = False
+length_rank=length_x / nproc
+length_last_rank=length_x -length_rank*(nproc-1)
+comm.Barrier()
+timestart=MPI.Wtime()
+start=rank*length_rank
+end=start+length_rank
+if rank==nproc-1: #last rank
+    end=start+length_last_rank
+temp=np.random.random((end-start,length_y))
+if colw==1:
+	with dset.collective:
+		dset[start:end,:] = temp
+else :
+	dset[start:end,:] = temp
+comm.Barrier()
+timeend=MPI.Wtime()
+if rank==0:
+    if colw==1:
+    	print "collective write time %f" %(timeend-timestart)
+    else :
+	print "independent write time %f" %(timeend-timestart)
+    print "data size x: %d y: %d" %(length_x, length_y)
+    print "file size ~%d GB" % (length_x*length_y/1024.0/1024.0/1024.0*8.0)
+    print "number of processes %d" %nproc
+f.close()
diff --git a/h5py/__init__.py b/h5py/__init__.py
index d7c8250..62987eb 100644
--- a/h5py/__init__.py
+++ b/h5py/__init__.py
@@ -7,8 +7,19 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    This is the h5py package, a Python interface to the HDF5
+    scientific data format.
+"""
+
 from __future__ import absolute_import
 
+
+# --- Library setup -----------------------------------------------------------
+
+# When importing from the root of the unpacked tarball or git checkout,
+# Python sees the "h5py" source directory and tries to load it, which fails.
+# We tried working around this by using "package_dir" but that breaks Cython.
 try:
     from . import _errors
 except ImportError:
@@ -20,44 +31,38 @@ except ImportError:
     
 _errors.silence_errors()
 
-from . import _conv
-_conv.register_converters()
+from ._conv import register_converters as _register_converters
+_register_converters()
 
-from . import h5a, h5d, h5ds, h5f, h5fd, h5g, h5r, h5s, h5t, h5p, h5z
+from .h5z import _register_lzf
+_register_lzf()
 
-h5s.NULL = h5s._NULL  # NULL is a reserved name at the Cython layer
-h5z._register_lzf()
 
-from .highlevel import *
+# --- Public API --------------------------------------------------------------
+
+from . import h5a, h5d, h5ds, h5f, h5fd, h5g, h5r, h5s, h5t, h5p, h5z
+
+from ._hl import filters
+from ._hl.base import is_hdf5, HLObject
+from ._hl.files import File
+from ._hl.group import Group, SoftLink, ExternalLink, HardLink
+from ._hl.dataset import Dataset
+from ._hl.datatype import Datatype
+from ._hl.attrs import AttributeManager
 
 from .h5 import get_config
 from .h5r import Reference, RegionReference
 from .h5t import special_dtype, check_dtype
 
-# Deprecated functions
-from .h5t import py_new_vlen as new_vlen
-from .h5t import py_get_vlen as get_vlen
-from .h5t import py_new_enum as new_enum
-from .h5t import py_get_enum as get_enum
-
 from . import version
+from .version import version as __version__
 
 from .tests import run_tests
 
-__version__ = version.version
-
-__doc__ = \
-"""
-    This is the h5py package, a Python interface to the HDF5
-    scientific data format.
-
-    Version %s
-
-    HDF5 %s
-""" % (version.version, version.hdf5_version)
-
-
 def enable_ipython_completer():
+    """ Call this from an interactive IPython session to enable tab-completion
+    of group and attribute names.
+    """
     import sys
     if 'IPython' in sys.modules:
         ip_running = False
@@ -74,5 +79,16 @@ def enable_ipython_completer():
             from . import ipy_completer
             return ipy_completer.load_ipython_extension()
 
-    raise RuntimeError('completer must be enabled in active ipython session')
+    raise RuntimeError('Completer must be enabled in active ipython session')
+
+
+# --- Legacy API --------------------------------------------------------------
+
+from .h5t import py_new_vlen as new_vlen
+from .h5t import py_get_vlen as get_vlen
+from .h5t import py_new_enum as new_enum
+from .h5t import py_get_enum as get_enum
+
+
+
 
diff --git a/h5py/_hl/__init__.py b/h5py/_hl/__init__.py
index 2a8ae91..d131a68 100644
--- a/h5py/_hl/__init__.py
+++ b/h5py/_hl/__init__.py
@@ -7,5 +7,12 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    This subpackage implements the high-level interface for h5py.
+    
+    Don't manually import things from here; the public API lives directly
+    in the top-level package namespace.
+"""
+
 from __future__ import absolute_import
 
diff --git a/h5py/_hl/attrs.py b/h5py/_hl/attrs.py
index a1c0e41..f68e4de 100644
--- a/h5py/_hl/attrs.py
+++ b/h5py/_hl/attrs.py
@@ -7,10 +7,16 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements high-level operations for attributes.
+    
+    Provides the AttributeManager class, available on high-level objects
+    as <obj>.attrs.
+"""
+
 from __future__ import absolute_import
 
 import numpy
-import collections
 
 from .. import h5s, h5t, h5a
 from . import base
@@ -19,7 +25,7 @@ from .dataset import readtime_dtype
 from .datatype import Datatype
 
 
-class AttributeManager(base.MutableMappingWithLock, base.CommonStateObject):
+class AttributeManager(base.MutableMappingHDF5, base.CommonStateObject):
 
     """
         Allows dictionary-style access to an HDF5 object's attributes.
@@ -181,7 +187,7 @@ class AttributeManager(base.MutableMappingWithLock, base.CommonStateObject):
                 try:
                     attr.write(data, mtype=htype2)
                 except:
-                    attr._close()
+                    attr.close()
                     h5a.delete(self._id, self._e(tempname))
                     raise
                 else:
@@ -191,7 +197,7 @@ class AttributeManager(base.MutableMappingWithLock, base.CommonStateObject):
                             h5a.delete(self._id, self._e(name))
                         h5a.rename(self._id, self._e(tempname), self._e(name))
                     except:
-                        attr._close()
+                        attr.close()
                         h5a.delete(self._id, self._e(tempname))
                         raise
                         
@@ -230,10 +236,12 @@ class AttributeManager(base.MutableMappingWithLock, base.CommonStateObject):
     def __iter__(self):
         """ Iterate over the names of attributes. """
         with phil:
+        
             attrlist = []
-
             def iter_cb(name, *args):
+                """ Callback to gather attribute names """
                 attrlist.append(self._d(name))
+
             h5a.iterate(self._id, iter_cb)
 
         for name in attrlist:
diff --git a/h5py/_hl/base.py b/h5py/_hl/base.py
index 71e4e56..7a606ba 100644
--- a/h5py/_hl/base.py
+++ b/h5py/_hl/base.py
@@ -7,17 +7,18 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements operations common to all high-level objects (File, etc.).
+"""
+
 from __future__ import absolute_import
 
 import posixpath
-import warnings
 import os
 import sys
-from collections import (
-    Mapping, MutableMapping, MappingView, KeysView, ValuesView, ItemsView
-)
-
 import six
+from collections import (Mapping, MutableMapping, KeysView, 
+                         ValuesView, ItemsView)
 
 from .. import h5d, h5i, h5r, h5p, h5f, h5t
 
@@ -112,6 +113,7 @@ class CommonStateObject(object):
         If name is None, returns either None or (None, None) appropriately.
         """
         def get_lcpl(coding):
+            """ Create an appropriate link creation property list """
             lcpl = self._lcpl.copy()
             lcpl.set_char_encoding(coding)
             return lcpl
@@ -179,7 +181,7 @@ class _RegionProxy(object):
             raise TypeError("Region references can only be made to datasets")
         from . import selections
         selection = selections.select(self.id.shape, args, dsid=self.id)
-        return h5r.create(self.id, b'.', h5r.DATASET_REGION, selection._id)
+        return h5r.create(self.id, b'.', h5r.DATASET_REGION, selection.id)
 
     def shape(self, ref):
         """ Get the shape of the target dataspace referred to by *ref*. """
@@ -284,24 +286,26 @@ class HLObject(CommonStateObject):
             return bool(self.id)
     __nonzero__ = __bool__
 
-class MappingViewWithLock(MappingView):
-
-    def __len__(self):
-        with phil:
-            return super(MappingViewWithLock, self).__len__()
 
+# --- Dictionary-style interface ----------------------------------------------
 
-class KeysViewWithLock(MappingViewWithLock, KeysView):
-    def __contains__(self, item):
-        with phil:
-            return super(KeysViewWithLock, self).__contains__(item)
+# To implement the dictionary-style interface from groups and attributes,
+# we inherit from the appropriate abstract base classes in collections.
+#
+# All locking is taken care of by the subclasses.
+# We have to override ValuesView and ItemsView here because Group and
+# AttributeManager can only test for key names.
 
-    def __iter__(self):
-        with phil:
-            return super(KeysViewWithLock, self).__iter__()
 
+class ValuesViewHDF5(ValuesView):
 
-class ValuesViewWithLock(MappingViewWithLock, ValuesView):
+    """
+        Wraps e.g. a Group or AttributeManager to provide a value view.
+        
+        Note that __contains__ will have poor performance as it has
+        to scan all the links or attributes.
+    """
+    
     def __contains__(self, value):
         with phil:
             for key in self._mapping:
@@ -315,7 +319,12 @@ class ValuesViewWithLock(MappingViewWithLock, ValuesView):
                 yield self._mapping.get(key)
 
 
-class ItemsViewWithLock(MappingViewWithLock, ItemsView):
+class ItemsViewHDF5(ItemsView):
+
+    """
+        Wraps e.g. a Group or AttributeManager to provide an items view.
+    """
+        
     def __contains__(self, item):
         with phil:
             key, val = item
@@ -329,30 +338,28 @@ class ItemsViewWithLock(MappingViewWithLock, ItemsView):
                 yield (key, self._mapping.get(key))
 
 
-class MappingWithLock(Mapping):
+class MappingHDF5(Mapping):
+
     """
-    Subclass of collections.Mapping with locks.
+        Wraps a Group, AttributeManager or DimensionManager object to provide
+        an immutable mapping interface.
+        
+        We don't inherit directly from MutableMapping because certain
+        subclasses, for example DimensionManager, are read-only.
     """
-    def get(self, name, default=None):
-        """ Retrieve the member, or return default if it doesn't exist """
-        with phil:
-            try:
-                return self[name]
-            except KeyError:
-                return default
-
+    
     if six.PY3:
         def keys(self):
             """ Get a view object on member names """
-            return KeysViewWithLock(self)
+            return KeysView(self)
 
         def values(self):
             """ Get a view object on member objects """
-            return ValuesViewWithLock(self)
+            return ValuesViewHDF5(self)
 
         def items(self):
             """ Get a view object on member items """
-            return ItemsViewWithLock(self)
+            return ItemsViewHDF5(self)
 
     else:
         def keys(self):
@@ -379,6 +386,15 @@ class MappingWithLock(Mapping):
             """ Get an iterator over (name, object) pairs """
             for x in self:
                 yield (x, self.get(x))
+                
+
+class MutableMappingHDF5(MappingHDF5, MutableMapping):
+
+    """
+        Wraps a Group or AttributeManager object to provide a mutable
+        mapping interface, in contrast to the read-only mapping of
+        MappingHDF5.
+    """
 
-class MutableMappingWithLock(MappingWithLock,MutableMapping):
     pass
+    
\ No newline at end of file
diff --git a/h5py/_hl/dataset.py b/h5py/_hl/dataset.py
index 89b02f0..ab257ba 100644
--- a/h5py/_hl/dataset.py
+++ b/h5py/_hl/dataset.py
@@ -7,17 +7,21 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements support for high-level dataset access.
+"""
+
 from __future__ import absolute_import
 
 import posixpath as pp
 import sys
 
 import six
-from six.moves import xrange
+from six.moves import xrange    # pylint: disable=redefined-builtin
 
 import numpy
 
-from .. import h5s, h5t, h5r, h5d
+from .. import h5, h5s, h5t, h5r, h5d, h5p, h5fd
 from .base import HLObject, phil, with_phil
 from . import filters
 from . import selections as sel
@@ -25,6 +29,7 @@ from . import selections2 as sel2
 from .datatype import Datatype
 
 _LEGACY_GZIP_COMPRESSION_VALS = frozenset(range(10))
+MPI = h5.get_config().mpi
 
 def readtime_dtype(basetype, names):
     """ Make a NumPy dtype appropriate for reading """
@@ -41,6 +46,7 @@ def readtime_dtype(basetype, names):
 
     return numpy.dtype([(name, basetype.fields[name][0]) for name in names])
 
+
 def make_new_dset(parent, shape=None, dtype=None, data=None,
                  chunks=None, compression=None, shuffle=None,
                     fletcher32=None, maxshape=None, compression_opts=None,
@@ -130,16 +136,39 @@ def make_new_dset(parent, shape=None, dtype=None, data=None,
 
 class AstypeContext(object):
 
+    """
+        Context manager which allows changing the type read from a dataset.
+    """
+    
     def __init__(self, dset, dtype):
         self._dset = dset
         self._dtype = numpy.dtype(dtype)
 
     def __enter__(self):
+        # pylint: disable=protected-access
         self._dset._local.astype = self._dtype
 
     def __exit__(self, *args):
+        # pylint: disable=protected-access
         self._dset._local.astype = None
 
+if MPI:
+    class CollectiveContext(object):
+
+        """ Manages collective I/O in MPI mode """
+
+        # We don't bother with _local as threads are forbidden in MPI mode
+
+        def __init__(self, dset):
+            self._dset = dset
+
+        def __enter__(self):
+            # pylint: disable=protected-access
+            self._dset._dxpl.set_dxpl_mpio(h5fd.MPIO_COLLECTIVE)
+
+        def __exit__(self, *args):
+            # pylint: disable=protected-access
+            self._dset._dxpl.set_dxpl_mpio(h5fd.MPIO_INDEPENDENT)
 
 class Dataset(HLObject):
 
@@ -156,20 +185,36 @@ class Dataset(HLObject):
         """
         return AstypeContext(self, dtype)
 
+    if MPI:
+        @property
+        @with_phil
+        def collective(self):
+            """ Context manager for MPI collective reads & writes """
+            return CollectiveContext(self)
+
+
     @property
     @with_phil
     def dims(self):
-        from . dims import DimensionManager
+        """ Access dimension scales attached to this dataset. """
+        from .dims import DimensionManager
         return DimensionManager(self)
 
     @property
     @with_phil
+    def ndim(self):
+        """Numpy-style attribute giving the number of dimensions"""
+        return self.id.rank
+
+    @property
+    @with_phil
     def shape(self):
         """Numpy-style shape tuple giving dataset dimensions"""
         return self.id.shape
     @shape.setter
     @with_phil
     def shape(self, shape):
+        # pylint: disable=missing-docstring
         self.resize(shape)
 
     @property
@@ -254,7 +299,7 @@ class Dataset(HLObject):
     def fillvalue(self):
         """Fill value for this dataset (0 by default)"""
         arr = numpy.ndarray((1,), dtype=self.dtype)
-        dcpl = self._dcpl.get_fill_value(arr)
+        self._dcpl.get_fill_value(arr)
         return arr[0]
 
     @with_phil
@@ -268,6 +313,7 @@ class Dataset(HLObject):
         HLObject.__init__(self, bind)
 
         self._dcpl = self.id.get_create_plist()
+        self._dxpl = h5p.create(h5p.DATASET_XFER)
         self._filters = filters.get_filters(self._dcpl)
         self._local = local()
         self._local.astype = None
@@ -360,21 +406,6 @@ class Dataset(HLObject):
         if not six.PY3:
             names = tuple(x.encode('utf-8') if isinstance(x, six.text_type) else x for x in names)
 
-        def readtime_dtype(basetype, names):
-            """ Make a NumPy dtype appropriate for reading """
-
-            if len(names) == 0:  # Not compound, or we want all fields
-                return basetype
-
-            if basetype.names is None:  # Names provided, but not compound
-                raise ValueError("Field names only allowed for compound types")
-
-            for name in names:  # Check all names are legal
-                if not name in basetype.names:
-                    raise ValueError("Field %s does not appear in this type." % name)
-
-            return numpy.dtype([(name, basetype.fields[name][0]) for name in names])
-
         new_dtype = getattr(self._local, 'astype', None)
         if new_dtype is not None:
             new_dtype = readtime_dtype(new_dtype, names)
@@ -447,8 +478,8 @@ class Dataset(HLObject):
 
         # Perfom the actual read
         mspace = h5s.create_simple(mshape)
-        fspace = selection._id
-        self.id.read(mspace, fspace, arr, mtype)
+        fspace = selection.id
+        self.id.read(mspace, fspace, arr, mtype, dxpl=self._dxpl)
 
         # Patch up the output for NumPy
         if len(names) == 1:
@@ -511,9 +542,10 @@ class Dataset(HLObject):
                 dtype = self.dtype
                 cast_compound = False
 
-            val = numpy.asarray(val, dtype=dtype, order='C')
+            val = numpy.asarray(val, dtype=dtype.base, order='C')
             if cast_compound:
-                val = val.astype(numpy.dtype([(names[0], dtype)]))
+                val = val.view(numpy.dtype([(names[0], dtype)]))
+                val = val.reshape(val.shape[:len(val.shape) - len(dtype.shape)])
         else:
             val = numpy.asarray(val, order='C')
 
@@ -541,7 +573,7 @@ class Dataset(HLObject):
         
             # Write non-compound source into a single dataset field
             if len(names) == 1 and val.dtype.fields is None:
-                subtype = h5y.py_create(val.dtype)
+                subtype = h5t.py_create(val.dtype)
                 mtype = h5t.create(h5t.COMPOUND, subtype.get_size())
                 mtype.insert(self._e(names[0]), 0, subtype)
 
@@ -566,7 +598,7 @@ class Dataset(HLObject):
             return
 
         # Broadcast scalars if necessary.
-        if (mshape == () and selection.mshape != ()):
+        if mshape == () and selection.mshape != ():
             if self.dtype.subdtype is not None:
                 raise TypeError("Scalar broadcasting is not supported for array dtypes")
             val2 = numpy.empty(selection.mshape[-1], dtype=val.dtype)
@@ -577,13 +609,13 @@ class Dataset(HLObject):
         # Perform the write, with broadcasting
         # Be careful to pad memory shape with ones to avoid HDF5 chunking
         # glitch, which kicks in for mismatched memory/file selections
-        if(len(mshape) < len(self.shape)):
+        if len(mshape) < len(self.shape):
             mshape_pad = (1,)*(len(self.shape)-len(mshape)) + mshape
         else:
             mshape_pad = mshape
         mspace = h5s.create_simple(mshape_pad, (h5s.UNLIMITED,)*len(mshape_pad))
         for fspace in selection.broadcast(mshape):
-            self.id.write(mspace, fspace, val, mtype)
+            self.id.write(mspace, fspace, val, mtype, dxpl=self._dxpl)
 
     def read_direct(self, dest, source_sel=None, dest_sel=None):
         """ Read data directly from HDF5 into an existing NumPy array.
@@ -598,7 +630,7 @@ class Dataset(HLObject):
                 source_sel = sel.SimpleSelection(self.shape)
             else:
                 source_sel = sel.select(self.shape, source_sel, self.id)  # for numpy.s_
-            fspace = source_sel._id
+            fspace = source_sel.id
 
             if dest_sel is None:
                 dest_sel = sel.SimpleSelection(dest.shape)
@@ -606,7 +638,7 @@ class Dataset(HLObject):
                 dest_sel = sel.select(dest.shape, dest_sel, self.id)
 
             for mspace in dest_sel.broadcast(source_sel.mshape):
-                self.id.read(mspace, fspace, dest)
+                self.id.read(mspace, fspace, dest, dxpl=self._dxpl)
 
     def write_direct(self, source, source_sel=None, dest_sel=None):
         """ Write data directly to HDF5 from a NumPy array.
@@ -621,7 +653,7 @@ class Dataset(HLObject):
                 source_sel = sel.SimpleSelection(source.shape)
             else:
                 source_sel = sel.select(source.shape, source_sel, self.id)  # for numpy.s_
-            mspace = source_sel._id
+            mspace = source_sel.id
 
             if dest_sel is None:
                 dest_sel = sel.SimpleSelection(self.shape)
@@ -629,7 +661,7 @@ class Dataset(HLObject):
                 dest_sel = sel.select(self.shape, dest_sel, self.id)
 
             for fspace in dest_sel.broadcast(source_sel.mshape):
-                self.id.write(mspace, fspace, source)
+                self.id.write(mspace, fspace, source, dxpl=self._dxpl)
 
     @with_phil
     def __array__(self, dtype=None):
diff --git a/h5py/_hl/datatype.py b/h5py/_hl/datatype.py
index aab79ae..8ecbb0f 100644
--- a/h5py/_hl/datatype.py
+++ b/h5py/_hl/datatype.py
@@ -7,12 +7,16 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements high-level access to committed datatypes in the file.
+"""
+
 from __future__ import absolute_import
 
 import posixpath as pp
 
 from ..h5t import TypeID
-from .base import HLObject, phil, with_phil
+from .base import HLObject, with_phil
 
 class Datatype(HLObject):
 
diff --git a/h5py/_hl/dims.py b/h5py/_hl/dims.py
index d11abc1..b9ee6ed 100644
--- a/h5py/_hl/dims.py
+++ b/h5py/_hl/dims.py
@@ -7,21 +7,28 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
-from __future__ import absolute_import
+"""
+    Implements support for HDF5 dimension scales.
+"""
 
-import numpy
+from __future__ import absolute_import
 
 from .. import h5ds
 from . import base
 from .base import phil, with_phil
-from .dataset import Dataset, readtime_dtype
+from .dataset import Dataset
 
 
 class DimensionProxy(base.CommonStateObject):
 
+    """
+        Represents an HDF5 "dimension".
+    """
+    
     @property
     @with_phil
     def label(self):
+        """ Get or set the dimension scale label """
         #return h5ds.get_label(self._id, self._dimension)
         # Produces a segfault for a non-existent label (Fixed in hdf5-1.8.8).
         # Here is a workaround:
@@ -33,11 +40,12 @@ class DimensionProxy(base.CommonStateObject):
     @label.setter
     @with_phil
     def label(self, val):
+        # pylint: disable=missing-docstring
         h5ds.set_label(self._id, self._dimension, self._e(val))
 
     @with_phil
-    def __init__(self, id, dimension):
-        self._id = id
+    def __init__(self, id_, dimension):
+        self._id = id_
         self._dimension = dimension
 
     @with_phil
@@ -59,52 +67,65 @@ class DimensionProxy(base.CommonStateObject):
 
     @with_phil
     def __getitem__(self, item):
+    
         if isinstance(item, int):
             scales = []
-            def f(dsid):
-                scales.append(Dataset(dsid))
-            h5ds.iterate(self._id, self._dimension, f, 0)
-            return scales[item]
+            h5ds.iterate(self._id, self._dimension, scales.append, 0)
+            return Dataset(scales[item])
+            
         else:
             def f(dsid):
+                """ Iterate over scales to find a matching name """
                 if h5ds.get_scale_name(dsid) == self._e(item):
-                    return Dataset(dsid)
+                    return dsid
+                    
             res = h5ds.iterate(self._id, self._dimension, f, 0)
             if res is None:
-                raise KeyError('%s not found' % item)
-            return res
+                raise KeyError(item)
+            return Dataset(res)
 
     def attach_scale(self, dset):
+        """ Attach a scale to this dimension.
+        
+        Provide the Dataset of the scale you would like to attach.
+        """
         with phil:
             h5ds.attach_scale(self._id, dset.id, self._dimension)
 
     def detach_scale(self, dset):
+        """ Remove a scale from this dimension.
+        
+        Provide the Dataset of the scale you would like to remove.
+        """
         with phil:
             h5ds.detach_scale(self._id, dset.id, self._dimension)
 
     def items(self):
+        """ Get a list of (name, Dataset) pairs with all scales on this
+        dimension.
+        """
         with phil:
             scales = []
-            def f(dsid):
-                scales.append(dsid)
-            
+
             # H5DSiterate raises an error if there are no dimension scales,
             # rather than iterating 0 times.  See #483.
             if len(self) > 0:
-                h5ds.iterate(self._id, self._dimension, f, 0)
+                h5ds.iterate(self._id, self._dimension, scales.append, 0)
                 
             return [
-                (self._d(h5ds.get_scale_name(id)), Dataset(id))
-                for id in scales
+                (self._d(h5ds.get_scale_name(x)), Dataset(x))
+                for x in scales
                 ]
 
     def keys(self):
+        """ Get a list of names for the scales on this dimension. """
         with phil:
-            return [key for (key, val) in self.items()]
+            return [key for (key, _) in self.items()]
 
     def values(self):
+        """ Get a list of Dataset for scales on this dimension. """
         with phil:
-            return [val for (key, val) in self.items()]
+            return [val for (_, val) in self.items()]
 
     @with_phil
     def __repr__(self):
@@ -114,9 +135,13 @@ class DimensionProxy(base.CommonStateObject):
                % (self.label, self._dimension, id(self._id)))
 
 
-class DimensionManager(base.MappingWithLock, base.CommonStateObject):
+class DimensionManager(base.MappingHDF5, base.CommonStateObject):
 
     """
+        Represents a collection of dimension associated with a dataset.
+        
+        Like AttributeManager, an instance of this class is returned when
+        accessing the ".dims" property on a Dataset.
     """
 
     @with_phil
@@ -151,5 +176,9 @@ class DimensionManager(base.MappingWithLock, base.CommonStateObject):
         return "<Dimensions of HDF5 object at %s>" % id(self._id)
 
     def create_scale(self, dset, name=''):
+        """ Create a new dimension, from an initial scale.
+        
+        Provide the dataset and a name for the scale.
+        """
         with phil:
             h5ds.set_scale(dset.id, self._e(name))
diff --git a/h5py/_hl/files.py b/h5py/_hl/files.py
index 515d0f8..8dd2c7e 100644
--- a/h5py/_hl/files.py
+++ b/h5py/_hl/files.py
@@ -7,17 +7,20 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements high-level support for HDF5 file objects.
+"""
+
 from __future__ import absolute_import
 
-import weakref
 import sys
 import os
 
 import six
 
-from .base import HLObject, phil, with_phil
+from .base import phil, with_phil
 from .group import Group
-from .. import h5, h5f, h5p, h5i, h5fd, h5t, _objects
+from .. import h5, h5f, h5p, h5i, h5fd, _objects
 from .. import version
 
 mpi = h5.get_config().mpi
@@ -49,15 +52,15 @@ def make_fapl(driver, libver, **kwds):
     if driver is None or (driver == 'windows' and sys.platform == 'win32'):
         return plist
 
-    if(driver == 'sec2'):
+    if driver == 'sec2':
         plist.set_fapl_sec2(**kwds)
-    elif(driver == 'stdio'):
+    elif driver == 'stdio':
         plist.set_fapl_stdio(**kwds)
-    elif(driver == 'core'):
+    elif driver == 'core':
         plist.set_fapl_core(**kwds)
-    elif(driver == 'family'):
+    elif driver == 'family':
         plist.set_fapl_family(memb_fapl=plist.copy(), **kwds)
-    elif(driver == 'mpio'):
+    elif driver == 'mpio':
         kwds.setdefault('info', mpi4py.MPI.Info())
         plist.set_fapl_mpio(**kwds)
     else:
@@ -202,23 +205,26 @@ class File(Group):
         @atomic.setter
         @with_phil
         def atomic(self, value):
+            # pylint: disable=missing-docstring
             self.id.set_mpi_atomicity(value)
             
     if swmr_support:
         @property
         def swmr_mode(self):
+            """ Controls single-writer multiple-reader mode """
             return self._swmr_mode
             
         @swmr_mode.setter
         @with_phil
         def swmr_mode(self, value):
+            # pylint: disable=missing-docstring
             if value:
                 self.id.start_swmr_write()
                 self._swmr_mode = True
             else:
-                raise ValueError("It is not possible to forcibly swith SWMR mode off.")
+                raise ValueError("It is not possible to forcibly switch SWMR mode off.")
 
-    def __init__(self, name, mode=None, driver=None, 
+    def __init__(self, name, mode=None, driver=None,
                  libver=None, userblock_size=None, swmr=False, **kwds):
         """Create a new file object.
 
@@ -227,6 +233,12 @@ class File(Group):
         name
             Name of the file on disk.  Note: for files created with the 'core'
             driver, HDF5 still requires this be non-empty.
+        mode
+            r        Readonly, file must exist
+            r+       Read/write, file must exist
+            w        Create file, truncate if exists
+            w- or x  Create file, fail if exists
+            a        Read/write if exists, create otherwise (default)
         driver
             Name of the driver to use.  Legal values are None (default,
             recommended), 'core', 'sec2', 'stdio', 'mpio'.
diff --git a/h5py/_hl/filters.py b/h5py/_hl/filters.py
index 1ef8370..a21f013 100644
--- a/h5py/_hl/filters.py
+++ b/h5py/_hl/filters.py
@@ -40,14 +40,9 @@
 
 from __future__ import absolute_import, division
 
-import six
-
 import numpy as np
+from .. import h5z, h5p, h5d
 
-from .. import h5s, h5z, h5p, h5d
-
-if six.PY3:
-    long = int
 
 _COMP_FILTERS = {'gzip': h5z.FILTER_DEFLATE,
                 'szip': h5z.FILTER_SZIP,
@@ -60,17 +55,18 @@ DEFAULT_GZIP = 4
 DEFAULT_SZIP = ('nn', 8)
 
 def _gen_filter_tuples():
-    decode = []
-    encode = []
-    for name, code in six.iteritems(_COMP_FILTERS):
+    """ Bootstrap function to figure out what filters are available. """
+    dec = []
+    enc = []
+    for name, code in _COMP_FILTERS.items():
         if h5z.filter_avail(code):
             info = h5z.get_filter_info(code)
             if info & h5z.FILTER_CONFIG_ENCODE_ENABLED:
-                encode.append(name)
+                enc.append(name)
             if info & h5z.FILTER_CONFIG_DECODE_ENABLED:
-                decode.append(name)
+                dec.append(name)
 
-    return tuple(decode), tuple(encode)
+    return tuple(dec), tuple(enc)
 
 decode, encode = _gen_filter_tuples()
 
@@ -218,7 +214,6 @@ def get_filters(plist):
     filters = {h5z.FILTER_DEFLATE: 'gzip', h5z.FILTER_SZIP: 'szip',
                h5z.FILTER_SHUFFLE: 'shuffle', h5z.FILTER_FLETCHER32: 'fletcher32',
                h5z.FILTER_LZF: 'lzf', h5z.FILTER_SCALEOFFSET: 'scaleoffset'}
-    szopts = {h5z.SZIP_EC_OPTION_MASK: 'ec', h5z.SZIP_NN_OPTION_MASK: 'nn'}
 
     pipeline = {}
 
@@ -226,7 +221,7 @@ def get_filters(plist):
 
     for i in range(nfilters):
 
-        code, flags, vals, desc = plist.get_filter(i)
+        code, _, vals, _ = plist.get_filter(i)
 
         if code == h5z.FILTER_DEFLATE:
             vals = vals[0] # gzip level
@@ -262,7 +257,8 @@ def guess_chunk(shape, maxshape, typesize):
 
     Undocumented and subject to change without warning.
     """
-
+    # pylint: disable=unused-argument
+    
     # For unlimited dimensions we have to guess 1024
     shape = tuple((x if x!=0 else 1024) for i, x in enumerate(shape))
 
@@ -304,7 +300,7 @@ def guess_chunk(shape, maxshape, typesize):
         chunks[idx%ndims] = np.ceil(chunks[idx%ndims] / 2.0)
         idx += 1
 
-    return tuple(long(x) for x in chunks)
+    return tuple(int(x) for x in chunks)
 
 
 
diff --git a/h5py/_hl/group.py b/h5py/_hl/group.py
index 4d469e2..e59246c 100644
--- a/h5py/_hl/group.py
+++ b/h5py/_hl/group.py
@@ -7,23 +7,25 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements support for high-level access to HDF5 groups.
+"""
+
 from __future__ import absolute_import
 
 import posixpath as pp
-
 import six
-
 import numpy
-import collections
+import sys
 
 from .. import h5g, h5i, h5o, h5r, h5t, h5l, h5p
 from . import base
-from .base import HLObject, MutableMappingWithLock, phil, with_phil
+from .base import HLObject, MutableMappingHDF5, phil, with_phil
 from . import dataset
 from . import datatype
 
 
-class Group(HLObject, MutableMappingWithLock):
+class Group(HLObject, MutableMappingHDF5):
 
     """ Represents an HDF5 group.
     """
@@ -197,6 +199,8 @@ class Group(HLObject, MutableMappingWithLock):
         >>> if cls == SoftLink:
         ...     print '"foo" is a soft link!'
         """
+        # pylint: disable=arguments-differ
+
         with phil:
             if not (getclass or getlink):
                 try:
@@ -225,15 +229,20 @@ class Group(HLObject, MutableMappingWithLock):
                         return SoftLink
                     linkbytes = self.id.links.get_val(self._e(name))
                     return SoftLink(self._d(linkbytes))
+                    
                 elif typecode == h5l.TYPE_EXTERNAL:
                     if getclass:
                         return ExternalLink
                     filebytes, linkbytes = self.id.links.get_val(self._e(name))
-                    # TODO: I think this is wrong,
-                    # we should use filesystem decoding on the filename
-                    return ExternalLink(self._d(filebytes), self._d(linkbytes))
+                    try:
+                        filetext = filebytes.decode(sys.getfilesystemencoding())
+                    except (UnicodeError, LookupError):
+                        filetext = filebytes
+                    return ExternalLink(filetext, self._d(linkbytes))
+                    
                 elif typecode == h5l.TYPE_HARD:
                     return HardLink if getclass else HardLink()
+                    
                 else:
                     raise TypeError("Unknown link type")
 
@@ -276,7 +285,7 @@ class Group(HLObject, MutableMappingWithLock):
                           self._e(obj.path), lcpl=lcpl, lapl=self._lapl)
 
         elif isinstance(obj, numpy.dtype):
-            htype = h5t.py_create(obj)
+            htype = h5t.py_create(obj, logical=True)
             htype.commit(self.id, name, lcpl=lcpl)
 
         else:
@@ -419,6 +428,7 @@ class Group(HLObject, MutableMappingWithLock):
         """
         with phil:
             def proxy(name):
+                """ Call the function with the text name, not bytes """
                 return func(self._d(name))
             return h5o.visit(self.id, proxy)
 
@@ -448,6 +458,7 @@ class Group(HLObject, MutableMappingWithLock):
         """
         with phil:
             def proxy(name):
+                """ Use the text name of the object, not bytes """
                 name = self._d(name)
                 return func(name, self[name])
             return h5o.visit(self.id, proxy)
@@ -477,7 +488,6 @@ class HardLink(object):
     pass
 
 
-#TODO: implement equality testing for these
 class SoftLink(object):
 
     """
@@ -488,6 +498,7 @@ class SoftLink(object):
 
     @property
     def path(self):
+        """ Soft link value.  Not guaranteed to be a valid path. """
         return self._path
 
     def __init__(self, path):
@@ -506,10 +517,12 @@ class ExternalLink(object):
 
     @property
     def path(self):
+        """ Soft link path, i.e. the part inside the HDF5 file. """
         return self._path
 
     @property
     def filename(self):
+        """ Path to the external HDF5 file in the filesystem. """
         return self._filename
 
     def __init__(self, filename, path):
diff --git a/h5py/_hl/selections.py b/h5py/_hl/selections.py
index 8f93524..614b236 100644
--- a/h5py/_hl/selections.py
+++ b/h5py/_hl/selections.py
@@ -7,6 +7,9 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+# We use __getitem__ side effects, which pylint doesn't like.
+# pylint: disable=pointless-statement
+
 """
     High-level access to HDF5 dataspace selections
 """
@@ -14,22 +17,12 @@
 from __future__ import absolute_import
 
 import six
-from six.moves import xrange
+from six.moves import xrange    # pylint: disable=redefined-builtin
 
 import numpy as np
 
 from .. import h5s, h5r
 
-# Selection types for hyperslabs
-from ..h5s import SELECT_SET  as SET
-from ..h5s import SELECT_OR   as OR
-from ..h5s import SELECT_AND  as AND
-from ..h5s import SELECT_XOR  as XOR
-from ..h5s import SELECT_NOTB as NOTB
-from ..h5s import SELECT_NOTA as NOTA
-
-if six.PY3:
-    long = int
 
 def select(shape, args, dsid):
     """ High-level routine to generate a selection from arbitrary arguments
@@ -119,7 +112,7 @@ class _RegionProxy(object):
         """ Takes arbitrary selection terms and produces a RegionReference
         object.  Selection must be compatible with the dataset.
         """
-        selection = select(self.id.shape, args)
+        selection = select(self.id.shape, args, self.id)
         return h5r.create(self.id, '.', h5r.DATASET_REGION, selection.id)
 
 class Selection(object):
@@ -198,7 +191,7 @@ class PointSelection(Selection):
     """
 
     def _perform_selection(self, points, op):
-
+        """ Internal method which actually performs the selection """
         points = np.asarray(points, order='C', dtype='u8')
         if len(points.shape) == 1:
             points.shape = (1,points.shape[0])
@@ -306,7 +299,7 @@ class SimpleSelection(Selection):
         tshape = tuple(tshape)
 
         chunks = tuple(x//y for x, y in zip(count, tshape))
-        nchunks = long(np.product(chunks))
+        nchunks = int(np.product(chunks))
 
         if nchunks == 1:
             yield self._id
@@ -319,71 +312,6 @@ class SimpleSelection(Selection):
                 yield sid
 
 
-class HyperSelection(Selection):
-
-    """
-        Represents multiple overlapping rectangular selections, combined
-        with set-like operators.  Result is a 1D shape, as with boolean array
-        selection.  Broadcasting is not supported for these selections.
-
-        When created, the entire dataspace is selected.  To make
-        adjustments to the selection, use the standard NumPy slicing
-        syntax, either via __getitem__ (as with simple selections) or via
-        __setitem__ and one of the supported operators:
-
-            >>> sel = HyperSelection((10,20))  # Initially 200 points
-            >>> sel[:,5:15] = False            # Now 100 points
-            >>> sel[:,10]   = True             # Now 110 points
-            >>> sel[...]    = XOR              # Now 90 points
-
-        Legal operators (in the h5py.selections module) are:
-           
-        SET
-            New selection, wiping out any old one
-       
-        AND, XOR, OR (or True)
-            Logical AND/XOR/OR between new and old selection
-
-        NOTA
-            Select only regions in new selection which don't intersect the old
-
-        NOTB (or False)
-            Select only regions in old selection which don't intersect the new
- 
-    """
-
-    def __getitem__(self, args):
-        self[args] = SET
-        return self
-
-    def __setitem__(self, args, op):
-
-        if not isinstance(args, tuple):
-            args = (args,)
- 
-        start, count, step, scalar = _handle_simple(self.shape, args)
-
-        if not op in (SET, OR, AND, XOR, NOTB, NOTA, True, False):
-            raise ValueError("Illegal selection operator")
-
-        if op is True:
-            op = OR
-        elif op is False:
-            op = NOTB
-
-        seltype = self._id.get_select_type()
-
-        if seltype == h5s.SEL_ALL:
-            self._id.select_hyperslab((0,)*len(self.shape), self.shape, op=h5s.SELECT_SET)
-       
-        elif seltype == h5s.SEL_NONE:
-            if op in (SET, OR, XOR, NOTA):
-                op = SET
-            else:
-                return
-
-        self._id.select_hyperslab(start, count, step, op=op)
-
 class FancySelection(Selection):
 
     """
@@ -429,10 +357,8 @@ class FancySelection(Selection):
                         raise TypeError("Indexing elements must be in increasing order")
 
         if len(sequenceargs) > 1:
-            # TODO: fix this with broadcasting
             raise TypeError("Only one indexing vector or array is currently allowed for advanced selection")
         if len(sequenceargs) == 0:
-            # TODO: fallback to standard selection
             raise TypeError("Advanced selection inappropriate")
 
         vectorlength = len(list(sequenceargs.values())[0])
@@ -484,7 +410,7 @@ def _expand_ellipsis(args, rank):
 
     final_args = []
     n_args = len(args)
-    for idx, arg in enumerate(args):
+    for arg in args:
 
         if arg is Ellipsis:
             final_args.extend( (slice(None,None,None),)*(rank-n_args+1) )
diff --git a/h5py/_hl/selections2.py b/h5py/_hl/selections2.py
index d0e1b54..8d04e61 100644
--- a/h5py/_hl/selections2.py
+++ b/h5py/_hl/selections2.py
@@ -7,6 +7,10 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Implements a portion of the selection operations.
+"""
+
 from __future__ import absolute_import
 
 import numpy as np
@@ -26,7 +30,7 @@ def read_dtypes(dataset_dtype, names):
         raise ValueError("Field names only allowed for compound types")
 
     elif any(x not in dataset_dtype.names for x in names):
-        raise ValueError("Field %s does not appear in this type." % name)
+        raise ValueError("Field does not appear in this type.")
 
     else:
         format_dtype = np.dtype([(name, dataset_dtype.fields[name][0]) for name in names])
@@ -71,6 +75,10 @@ def read_selections_scalar(dsid, args):
 
 class ScalarReadSelection(object):
 
+    """
+        Implements slicing for scalar datasets.
+    """
+    
     def __init__(self, fspace, args):
         if args == ():
             self.mshape = None
@@ -87,7 +95,10 @@ class ScalarReadSelection(object):
         yield self.fspace, self.mspace        
 
 def select_read(fspace, args):
+    """ Top-level dispatch function for reading.
     
+    At the moment, only supports reading from scalar datasets.
+    """
     if fspace.shape == ():
         return ScalarReadSelection(fspace, args)
 
diff --git a/h5py/_objects.pyx b/h5py/_objects.pyx
index 6c42d43..438572c 100644
--- a/h5py/_objects.pyx
+++ b/h5py/_objects.pyx
@@ -212,6 +212,13 @@ cdef class ObjectID:
                 self.id = 0
 
 
+    def close(self):
+        """ Close this identifier. """
+        # Note this is the default close method.  Subclasses, e.g. FileID,
+        # which have nonlocal effects should override this.
+        self._close()
+
+
     def __nonzero__(self):
         return self.valid
 
diff --git a/h5py/api_functions.txt b/h5py/api_functions.txt
index 96579e4..06e8eea 100644
--- a/h5py/api_functions.txt
+++ b/h5py/api_functions.txt
@@ -32,7 +32,7 @@
 hdf5:
 
   # === H5 - General library functions ========================================
-  
+
   herr_t    H5open()
   herr_t    H5close()
   herr_t    H5get_libversion(unsigned *majnum, unsigned *minnum, unsigned *relnum)
@@ -60,9 +60,9 @@ hdf5:
 
   hid_t     H5Acreate_by_name(hid_t loc_id, char *obj_name, char *attr_name, hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t lapl_id)
 
-  herr_t    H5Aopen(hid_t obj_id, char *attr_name, hid_t aapl_id)
-  herr_t    H5Aopen_by_name( hid_t loc_id, char *obj_name, char *attr_name, hid_t aapl_id, hid_t lapl_id)
-  herr_t    H5Aopen_by_idx(hid_t loc_id, char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t aapl_id, hid_t lapl_id)
+  hid_t     H5Aopen(hid_t obj_id, char *attr_name, hid_t aapl_id)
+  hid_t     H5Aopen_by_name( hid_t loc_id, char *obj_name, char *attr_name, hid_t aapl_id, hid_t lapl_id)
+  hid_t     H5Aopen_by_idx(hid_t loc_id, char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t aapl_id, hid_t lapl_id)
   htri_t    H5Aexists_by_name( hid_t loc_id, char *obj_name, char *attr_name, hid_t lapl_id)
   htri_t    H5Aexists(hid_t obj_id, char *attr_name)
 
@@ -78,7 +78,7 @@ hdf5:
 
 
   # === H5D - Dataset API =====================================================
-  
+
   hid_t     H5Dcreate2(hid_t loc_id, char *name, hid_t type_id, hid_t space_id, hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id)
   hid_t     H5Dcreate_anon(hid_t file_id, hid_t type_id, hid_t space_id, hid_t plist_id, hid_t dapl_id)
 
@@ -106,12 +106,18 @@ hdf5:
 
   herr_t    H5Diterate(void *buf, hid_t type_id, hid_t space_id,  H5D_operator_t op, void* operator_data)
   herr_t    H5Dset_extent(hid_t dset_id, hsize_t* size)
-  
+
   # SWMR functions
   1.9.178   herr_t H5Dflush(hid_t dataset_id)
   1.9.178   herr_t H5Drefresh(hid_t dataset_id)
-  
-  
+
+
+  # === H5E - Minimal error-handling interface ================================
+
+  # This is used only for defs.py, to disable printing in new threads
+  herr_t H5Eset_auto(void* a, void* b)
+
+
   # === H5F - File API ========================================================
 
   hid_t     H5Fcreate(char *filename, unsigned int flags, hid_t create_plist, hid_t access_plist)
@@ -142,7 +148,7 @@ hdf5:
   # MPI functions
   MPI 1.8.9 herr_t H5Fset_mpi_atomicity(hid_t file_id, hbool_t flag)
   MPI 1.8.9 herr_t H5Fget_mpi_atomicity(hid_t file_id, hbool_t *flag)
-  
+
   # SWMR functions
   1.9.178   herr_t H5Fstart_swmr_write(hid_t file_id)
 
@@ -243,7 +249,7 @@ hdf5:
   # General operations
   hid_t     H5Pcreate(hid_t plist_id)
   hid_t     H5Pcopy(hid_t plist_id)
-  int       H5Pget_class(hid_t plist_id)
+  hid_t     H5Pget_class(hid_t plist_id)
   herr_t    H5Pclose(hid_t plist_id)
   htri_t    H5Pequal( hid_t id1, hid_t id2 )
   herr_t    H5Pclose_class(hid_t id)
@@ -302,12 +308,25 @@ hdf5:
   herr_t        H5Pset_shuffle(hid_t plist_id)
   herr_t        H5Pset_szip(hid_t plist, unsigned int options_mask, unsigned int pixels_per_block)
   herr_t        H5Pset_scaleoffset(hid_t plist, H5Z_SO_scale_type_t scale_type, int scale_factor)
+  1.9.233 ssize_t H5Pget_virtual_dsetname(hid_t dcpl_id, size_t index, char *name, size_t size)
+  1.9.233 ssize_t H5Pget_virtual_filename(hid_t dcpl_id, size_t index, char *name, size_t size)
+  1.9.233 herr_t H5Pget_virtual_count(hid_t dcpl_id, size_t *count)
+  1.9.233 herr_t H5Pset_virtual(hid_t dcpl_id, hid_t vspace_id, const char *src_file_name, const char *src_dset_name, hid_t src_space_id)
+  1.9.233 hid_t H5Pget_virtual_vspace(hid_t dcpl_id, size_t index)
+  1.9.233 hid_t H5Pget_virtual_srcspace(hid_t dcpl_id, size_t index)
 
   # Dataset access
   herr_t    H5Pset_edc_check(hid_t plist, H5Z_EDC_t check)
   H5Z_EDC_t H5Pget_edc_check(hid_t plist)
   herr_t    H5Pset_chunk_cache( hid_t dapl_id, size_t rdcc_nslots, size_t rdcc_nbytes, double rdcc_w0 )
   herr_t    H5Pget_chunk_cache( hid_t dapl_id, size_t *rdcc_nslots, size_t *rdcc_nbytes, double *rdcc_w0 )
+  1.9.233 herr_t H5Pset_virtual_view(hid_t plist_id, H5D_vds_view_t view)
+  1.9.233 herr_t H5Pget_virtual_view(hid_t plist_id, H5D_vds_view_t *view)
+  1.9.233 herr_t H5Pset_virtual_printf_gap(hid_t plist_id, hsize_t gap_size)
+  1.9.233 herr_t H5Pget_virtual_printf_gap(hid_t plist_id, hsize_t *gap_size)
+
+  MPI herr_t H5Pset_dxpl_mpio( hid_t dxpl_id, H5FD_mpio_xfer_t xfer_mode )
+  MPI herr_t H5Pget_dxpl_mpio( hid_t dxpl_id, H5FD_mpio_xfer_t* xfer_mode )
 
   # Other properties
   herr_t    H5Pset_sieve_buf_size(hid_t fapl_id, size_t size)
@@ -347,7 +366,7 @@ hdf5:
   herr_t    H5Pset_alignment(hid_t plist_id, hsize_t threshold, hsize_t alignment)
   herr_t    H5Pget_alignment(hid_t plist_id, hsize_t *threshold, hsize_t *alignment)
 
-  # MPI functions  
+  # MPI functions
   MPI herr_t H5Pset_fapl_mpio(hid_t fapl_id, MPI_Comm comm, MPI_Info info)
   MPI herr_t H5Pget_fapl_mpio(hid_t fapl_id, MPI_Comm *comm, MPI_Info *info)
 
@@ -403,6 +422,9 @@ hdf5:
   herr_t    H5Sencode(hid_t obj_id, void *buf, size_t *nalloc)
   hid_t     H5Sdecode(void *buf)
 
+  1.9.233 htri_t H5Sis_regular_hyperslab(hid_t spaceid)
+  1.9.233 htri_t H5Sget_regular_hyperslab(hid_t spaceid, hsize_t* start, hsize_t* stride, hsize_t* count, hsize_t* block)
+
 
   # === H5T - Datatypes =========================================================
 
@@ -511,7 +533,7 @@ hdf5_hl:
 
   ERROR herr_t  H5DSset_label(hid_t did, unsigned int idx, char *label)
   ERROR ssize_t H5DSget_label(hid_t did, unsigned int idx, char *label, size_t size)
-  
+
   ERROR ssize_t H5DSget_scale_name(hid_t did, char *name, size_t size)
   ERROR htri_t  H5DSis_scale(hid_t did)
   ERROR herr_t  H5DSiterate_scales(hid_t did, unsigned int dim, int *idx, H5DS_iterate_t visitor, void *visitor_data)
diff --git a/h5py/api_types_hdf5.pxd b/h5py/api_types_hdf5.pxd
index 250d91e..a21b682 100644
--- a/h5py/api_types_hdf5.pxd
+++ b/h5py/api_types_hdf5.pxd
@@ -9,9 +9,11 @@
 
 from api_types_ext cimport *
 
+include "config.pxi"
+
 cdef extern from "hdf5.h":
   # Basic types
-  ctypedef int hid_t
+  ctypedef long int hid_t
   ctypedef int hbool_t
   ctypedef int herr_t
   ctypedef int htri_t
@@ -42,11 +44,18 @@ cdef extern from "hdf5.h":
 # === H5D - Dataset API =======================================================
 
   ctypedef enum H5D_layout_t:
-    H5D_LAYOUT_ERROR    = -1,
-    H5D_COMPACT         = 0,
-    H5D_CONTIGUOUS      = 1,
-    H5D_CHUNKED         = 2,
-    H5D_NLAYOUTS        = 3
+      H5D_LAYOUT_ERROR    = -1,
+      H5D_COMPACT         = 0,
+      H5D_CONTIGUOUS      = 1,
+      H5D_CHUNKED         = 2,
+      H5D_VIRTUAL         = 3,  # New in 1.10
+      H5D_NLAYOUTS        = 4
+
+  IF HDF5_VERSION >= VDS_MIN_HDF5_VERSION:
+    ctypedef enum H5D_vds_view_t:
+        H5D_VDS_ERROR           = -1,
+        H5D_VDS_FIRST_MISSING   = 0,
+        H5D_VDS_LAST_AVAILABLE  = 1
 
   ctypedef enum H5D_alloc_time_t:
     H5D_ALLOC_TIME_ERROR    =-1,
@@ -168,6 +177,10 @@ cdef extern from "hdf5.h":
   # Flag for tracking allocation of space in file
   int H5FD_LOG_ALLOC      # 0x4000
   int H5FD_LOG_ALL        # (H5FD_LOG_ALLOC|H5FD_LOG_TIME_IO|H5FD_LOG_NUM_IO|H5FD_LOG_FLAVOR|H5FD_LOG_FILE_IO|H5FD_LOG_LOC_IO)
+  IF MPI:
+    ctypedef enum H5FD_mpio_xfer_t:
+     H5FD_MPIO_INDEPENDENT = 0,
+     H5FD_MPIO_COLLECTIVE
 
 # === H5G - Groups API ========================================================
 
@@ -208,6 +221,7 @@ cdef extern from "hdf5.h":
 # === H5I - Identifier and reflection interface ===============================
 
   ctypedef enum H5I_type_t:
+    H5I_UNINIT       = -2,  # uninitialized Group
     H5I_BADID        = -1,  # invalid Group
     H5I_FILE        = 1,    # group ID for File objects
     H5I_GROUP,              # group ID for Group objects
@@ -219,7 +233,10 @@ cdef extern from "hdf5.h":
     H5I_VFL,                # group ID for virtual file layer
     H5I_GENPROP_CLS,        # group ID for generic property list classes
     H5I_GENPROP_LST,        # group ID for generic property lists
-    H5I_NGROUPS             # number of valid groups, MUST BE LAST!
+    H5I_ERROR_CLASS,        # group ID for error classes
+    H5I_ERROR_MSG,          # group ID for error messages
+    H5I_ERROR_STACK,        # group ID for error stacks
+    H5I_NTYPES              # number of valid groups, MUST BE LAST!
 
 # === H5L/H5O - Links interface (1.8.X only) ======================================
 
diff --git a/h5py/h5.pyx b/h5py/h5.pyx
index 48131a7..bb4ecdb 100644
--- a/h5py/h5.pyx
+++ b/h5py/h5.pyx
@@ -16,8 +16,8 @@ ITER_INC    = H5_ITER_INC     # Increasing order
 ITER_DEC    = H5_ITER_DEC     # Decreasing order
 ITER_NATIVE = H5_ITER_NATIVE  # No particular order, whatever is fastest
 
-INDEX_NAME      = H5_INDEX_NAME       # Index on names      
-INDEX_CRT_ORDER = H5_INDEX_CRT_ORDER  # Index on creation order    
+INDEX_NAME      = H5_INDEX_NAME       # Index on names
+INDEX_CRT_ORDER = H5_INDEX_CRT_ORDER  # Index on creation order
 
 class ByteStringContext(object):
 
@@ -114,7 +114,7 @@ cdef class H5PYConfig:
     property read_byte_strings:
         """ Returns a context manager which forces all strings to be returned
         as byte strings. """
-        
+
         def __get__(self):
             with phil:
                 return self._bytestrings
@@ -126,12 +126,17 @@ cdef class H5PYConfig:
                 return True
             ELSE:
                 return False
-                
+
     property swmr_min_hdf5_version:
         """ Tuple indicating the minimum HDF5 version required for SWMR features"""
         def __get__(self):
             return SWMR_MIN_HDF5_VERSION
 
+    property vds_min_hdf5_version:
+        """Tuple indicating the minimum HDF5 version required for virtual dataset (VDS) features"""
+        def __get__(self):
+            return VDS_MIN_HDF5_VERSION
+
 cdef H5PYConfig cfg = H5PYConfig()
 
 cpdef H5PYConfig get_config():
@@ -151,7 +156,7 @@ def get_libversion():
     cdef unsigned int minor
     cdef unsigned int release
     cdef herr_t retval
-    
+
     H5get_libversion(&major, &minor, &release)
 
     return (major, minor, release)
diff --git a/h5py/h5d.pyx b/h5py/h5d.pyx
index 032fb0f..5be3279 100644
--- a/h5py/h5d.pyx
+++ b/h5py/h5d.pyx
@@ -51,6 +51,10 @@ FILL_VALUE_UNDEFINED    = H5D_FILL_VALUE_UNDEFINED
 FILL_VALUE_DEFAULT      = H5D_FILL_VALUE_DEFAULT
 FILL_VALUE_USER_DEFINED = H5D_FILL_VALUE_USER_DEFINED
 
+IF HDF5_VERSION >= VDS_MIN_HDF5_VERSION:
+    VIRTUAL = H5D_VIRTUAL
+    VDS_FIRST_MISSING   = H5D_VDS_FIRST_MISSING
+    VDS_LAST_AVAILABLE  = H5D_VDS_LAST_AVAILABLE
 
 # === Dataset operations ======================================================
 
@@ -353,41 +357,41 @@ cdef class DatasetID(ObjectID):
             may even be zero.
         """
         return H5Dget_storage_size(self.id)
-        
+
     IF HDF5_VERSION >= SWMR_MIN_HDF5_VERSION:
 
         @with_phil
         def flush(self):
             """ no return
-            
+
             Flushes all buffers associated with a dataset to disk.
-            
-            This function causes all buffers associated with a dataset to be 
+
+            This function causes all buffers associated with a dataset to be
             immediately flushed to disk without removing the data from the cache.
-            
+
             Use this in SWMR write mode to allow readers to be updated with the
             dataset changes.
-            
+
             Feature requires: 1.9.178 HDF5
-            """ 
+            """
             H5Dflush(self.id)
 
         @with_phil
         def refresh(self):
             """ no return
-            
-            Refreshes all buffers associated with a dataset. 
-            
+
+            Refreshes all buffers associated with a dataset.
+
             This function causes all buffers associated with a dataset to be
             cleared and immediately re-loaded with updated contents from disk.
-            
+
             This function essentially closes the dataset, evicts all metadata
             associated with it from the cache, and then re-opens the dataset.
-            The reopened dataset is automatically re-registered with the same ID. 
-            
+            The reopened dataset is automatically re-registered with the same ID.
+
             Use this in SWMR read mode to poll for dataset changes.
-            
+
             Feature requires: 1.9.178 HDF5
-            """ 
+            """
             H5Drefresh(self.id)
 
diff --git a/h5py/h5fd.pxd b/h5py/h5fd.pxd
index 37d64ba..00a7b8d 100644
--- a/h5py/h5fd.pxd
+++ b/h5py/h5fd.pxd
@@ -39,7 +39,8 @@ cdef extern from "hdf5.h":
   hid_t H5FD_STDIO
   IF UNAME_SYSNAME == "Windows":
     hid_t H5FD_WINDOWS
-
+  hid_t H5FD_MPIO_COLLECTIVE
+  hid_t H5FD_MPIO_INDEPENDENT
   int H5FD_LOG_LOC_READ   # 0x0001
   int H5FD_LOG_LOC_WRITE  # 0x0002
   int H5FD_LOG_LOC_SEEK   # 0x0004
diff --git a/h5py/h5fd.pyx b/h5py/h5fd.pyx
index 735dc27..2b6f79d 100644
--- a/h5py/h5fd.pyx
+++ b/h5py/h5fd.pyx
@@ -25,6 +25,11 @@ MEM_LHEAP = H5FD_MEM_LHEAP
 MEM_OHDR = H5FD_MEM_OHDR
 MEM_NTYPES = H5FD_MEM_NTYPES
 
+# === MPI driver ==============================================================
+
+MPIO_INDEPENDENT = H5FD_MPIO_INDEPENDENT
+MPIO_COLLECTIVE = H5FD_MPIO_COLLECTIVE
+
 # === Driver types ============================================================
 
 CORE = H5FD_CORE
diff --git a/h5py/h5p.pyx b/h5py/h5p.pyx
index da175dd..b8b0f2b 100644
--- a/h5py/h5p.pyx
+++ b/h5py/h5p.pyx
@@ -19,13 +19,18 @@ from utils cimport  require_tuple, convert_dims, convert_tuple, \
                     check_numpy_write, check_numpy_read
 from numpy cimport ndarray, import_array
 from h5t cimport TypeID, py_create
+from h5s cimport SpaceID
 from h5ac cimport CacheConfig
 from h5py import _objects
 
 from ._objects import phil, with_phil
 
 if MPI:
-    from mpi4py.mpi_c cimport MPI_Comm, MPI_Info, MPI_Comm_dup, MPI_Info_dup, \
+    if MPI4PY_V2:
+        from mpi4py.libmpi cimport MPI_Comm, MPI_Info, MPI_Comm_dup, MPI_Info_dup, \
+                               MPI_Comm_free, MPI_Info_free
+    else:
+        from mpi4py.mpi_c cimport MPI_Comm, MPI_Info, MPI_Comm_dup, MPI_Info_dup, \
                                MPI_Comm_free, MPI_Info_free
 
 # Initialization
@@ -371,7 +376,6 @@ cdef class PropDCID(PropOCID):
         Dataset creation property list.
     """
 
-
     @with_phil
     def set_layout(self, int layout_code):
         """(INT layout_code)
@@ -381,8 +385,9 @@ cdef class PropDCID(PropOCID):
         - h5d.COMPACT
         - h5d.CONTIGUOUS
         - h5d.CHUNKED
+        - h5d.VIRTUAL (If using HDF5 library version 1.10 or later)
         """
-        H5Pset_layout(self.id, layout_code)
+        H5Pset_layout(self.id, <H5D_layout_t>layout_code)
 
 
     @with_phil
@@ -394,10 +399,10 @@ cdef class PropDCID(PropOCID):
         - h5d.COMPACT
         - h5d.CONTIGUOUS
         - h5d.CHUNKED
+        - h5d.VIRTUAL (If using HDF5 library version 1.10 or later)
         """
         return <int>H5Pget_layout(self.id)
 
-
     @with_phil
     def set_chunk(self, object chunksize):
         """(TUPLE chunksize)
@@ -750,6 +755,91 @@ cdef class PropDCID(PropOCID):
         fail.'''
         H5Pset_scaleoffset(self.id, scale_type, scale_factor)
 
+    # === Virtual dataset functions ===========================================
+
+    IF HDF5_VERSION >= VDS_MIN_HDF5_VERSION:
+
+        @with_phil
+        def set_virtual(self, SpaceID vspace not None, char* src_file_name,
+                        char* src_dset_name, SpaceID src_space not None):
+            """(SpaceID vspace, STR src_file_name, STR src_dset_name, SpaceID src_space)
+
+            Set the mapping between virtual and source datasets.
+
+            The virtual dataset is described by its virtual dataspace (vspace)
+            to the elements. The source dataset is described by the name of the
+            file where it is located (src_file_name), the name of the dataset
+            (src_dset_name) and its dataspace (src_space).
+            """
+            H5Pset_virtual(self.id, vspace.id, src_file_name, src_dset_name, src_space.id)
+
+        @with_phil
+        def get_virtual_count(self):
+            """() => UINT
+
+            Get the number of mappings for the virtual dataset.
+            """
+            cdef size_t count
+            H5Pget_virtual_count(self.id, &count)
+            return count
+
+        @with_phil
+        def get_virtual_dsetname(self, size_t index=0):
+            """(UINT index=0) => STR
+
+            Get the name of a source dataset used in the mapping of the virtual
+            dataset at the position index.
+            """
+            cdef char* name = NULL
+            cdef ssize_t size
+
+            size = H5Pget_virtual_dsetname(self.id, index, NULL, 0)
+            name = <char*>emalloc(size+1)
+            try:
+                H5Pget_virtual_dsetname(self.id, index, name, <size_t>size+1)
+                src_dset_name = name
+            finally:
+                efree(name)
+
+            return src_dset_name
+
+        @with_phil
+        def get_virtual_filename(self, size_t index=0):
+            """(UINT index=0) => STR
+
+            Get the file name of a source dataset used in the mapping of the
+            virtual dataset at the position index.
+            """
+            cdef char* name = NULL
+            cdef ssize_t size
+
+            size = H5Pget_virtual_dsetname(self.id, index, NULL, 0)
+            name = <char*>emalloc(size+1)
+            try:
+                H5Pget_virtual_filename(self.id, index, name, <size_t>size+1)
+                src_fname = name
+            finally:
+                efree(name)
+
+            return src_fname
+
+        @with_phil
+        def get_virtual_vspace(self, size_t index=0):
+            """(UINT index=0) => SpaceID
+
+            Get a dataspace for the selection within the virtual dataset used
+            in the mapping.
+            """
+            return SpaceID(H5Pget_virtual_vspace(self.id, index))
+
+        @with_phil
+        def get_virtual_srcspace(self, size_t index=0):
+            """(UINT index=0) => SpaceID
+
+            Get a dataspace for the selection within the source dataset used
+            in the mapping.
+            """
+            return SpaceID(H5Pget_virtual_srcspace(self.id, index))
 
 # File access
 cdef class PropFAID(PropInstanceID):
@@ -1001,7 +1091,7 @@ cdef class PropFAID(PropInstanceID):
             Comm: An mpi4py.MPI.Comm instance
             Info: An mpi4py.MPI.Info instance
             """
-            H5Pset_fapl_mpio(self.id, comm.ob_mpi, info.ob_mpi) 
+            H5Pset_fapl_mpio(self.id, comm.ob_mpi, info.ob_mpi)
 
 
         @with_phil
@@ -1299,3 +1389,92 @@ cdef class PropDAID(PropInstanceID):
 
         H5Pget_chunk_cache(self.id, &rdcc_nslots, &rdcc_nbytes, &rdcc_w0 )
         return (rdcc_nslots,rdcc_nbytes,rdcc_w0)
+
+    # === Virtual dataset functions ===========================================
+    IF HDF5_VERSION >= VDS_MIN_HDF5_VERSION:
+
+        @with_phil
+        def set_virtual_view(self, unsigned int view):
+            """(UINT view)
+
+            Set the view of the virtual dataset (VDS) to include or exclude
+            missing mapped elements.
+
+            If view is set to h5d.VDS_FIRST_MISSING, the view includes all data
+            before the first missing mapped data. This setting provides a view
+            containing only the continuous data starting with the dataset’s
+            first data element. Any break in continuity terminates the view.
+
+            If view is set to h5d.VDS_LAST_AVAILABLE, the view includes all
+            available mapped data.
+
+            Missing mapped data is filled with the fill value set in the
+            virtual dataset's creation property list.
+            """
+            H5Pset_virtual_view(self.id, <H5D_vds_view_t>view)
+
+        @with_phil
+        def get_virtual_view(self):
+            """() => UINT view
+
+            Retrieve the view of the virtual dataset.
+
+            Valid values are:
+
+            - h5d.VDS_FIRST_MISSING
+            - h5d.VDS_LAST_AVAILABLE
+            """
+            cdef H5D_vds_view_t view
+            H5Pget_virtual_view(self.id, &view)
+            return <unsigned int>view
+
+        @with_phil
+        def set_virtual_printf_gap(self, hsize_t gap_size=0):
+            """(LONG gap_size=0)
+
+            Set the maximum number of missing source files and/or datasets
+            with the printf-style names when getting the extent of an unlimited
+            virtual dataset.
+
+            Instruct the library to stop looking for the mapped data stored in
+            the files and/or datasets with the printf-style names after not
+            finding gap_size files and/or datasets. The found source files and
+            datasets will determine the extent of the unlimited virtual dataset
+            with the printf-style mappings. Default value: 0.
+            """
+            H5Pset_virtual_printf_gap(self.id, gap_size)
+
+        @with_phil
+        def get_virtual_printf_gap(self):
+            """() => LONG gap_size
+
+            Return the maximum number of missing source files and/or datasets
+            with the printf-style names when getting the extent for an
+            unlimited virtual dataset.
+            """
+            cdef hsize_t gap_size
+            H5Pget_virtual_printf_gap(self.id, &gap_size)
+            return gap_size
+
+cdef class PropDXID(PropInstanceID):
+
+    """ Data transfer property list """
+
+    IF MPI:
+        def set_dxpl_mpio(self, int xfer_mode):
+            """ Set the transfer mode for MPI I/O.
+            Must be one of:
+            - h5fd.MPIO_INDEPDENDENT (default)
+            - h5fd.MPIO_COLLECTIVE
+            """
+            H5Pset_dxpl_mpio(self.id, <H5FD_mpio_xfer_t>xfer_mode)
+
+        def get_dxpl_mpio(self):
+            """ Get the current transfer mode for MPI I/O.
+            Will be one of:
+            - h5fd.MPIO_INDEPDENDENT (default)
+            - h5fd.MPIO_COLLECTIVE
+            """
+            cdef H5FD_mpio_xfer_t mode
+            H5Pget_dxpl_mpio(self.id, &mode)
+            return <int>mode
diff --git a/h5py/h5s.pyx b/h5py/h5s.pyx
index fbf13cb..5ff9e5b 100644
--- a/h5py/h5s.pyx
+++ b/h5py/h5s.pyx
@@ -11,6 +11,8 @@
     Low-level interface to the "H5S" family of data-space functions.
 """
 
+include "config.pxi"
+
 # Pyrex compile-time imports
 from utils cimport  require_tuple, convert_dims, convert_tuple, \
                     emalloc, efree, create_numpy_hsize, create_hsize_array
@@ -47,7 +49,7 @@ UNLIMITED = H5S_UNLIMITED
 NO_CLASS = H5S_NO_CLASS
 SCALAR   = H5S_SCALAR
 SIMPLE   = H5S_SIMPLE
-_NULL = H5S_NULL
+globals()["NULL"] = H5S_NULL  # "NULL" is reserved in Cython
 
 #enum H5S_sel_type
 SEL_ERROR       = H5S_SEL_ERROR
@@ -572,8 +574,54 @@ cdef class SpaceID(ObjectID):
             efree(stride_array)
             efree(block_array)
 
+    # === Virtual dataset functions ===========================================
+
+    IF HDF5_VERSION >= VDS_MIN_HDF5_VERSION:
 
+        @with_phil
+        def is_regular_hyperslab(self):
+            """() => BOOL
 
+            Determine whether a hyperslab selection is regular.
+            """
+            return <bint>H5Sis_regular_hyperslab(self.id)
 
+        @with_phil
+        def get_regular_hyperslab(self):
+            """() => (TUPLE start, TUPLE stride, TUPLE count, TUPLE block)
 
+            Retrieve a regular hyperslab selection.
+            """
+            cdef int rank
+            cdef hsize_t* start_array = NULL
+            cdef hsize_t* count_array = NULL
+            cdef hsize_t* stride_array = NULL
+            cdef hsize_t* block_array = NULL
+            cdef list start = []
+            cdef list stride = []
+            cdef list count = []
+            cdef list block = []
+            cdef int i
 
+            rank = H5Sget_simple_extent_ndims(self.id)
+            try:
+                start_array = <hsize_t*>emalloc(sizeof(hsize_t)*rank)
+                stride_array = <hsize_t*>emalloc(sizeof(hsize_t)*rank)
+                count_array = <hsize_t*>emalloc(sizeof(hsize_t)*rank)
+                block_array = <hsize_t*>emalloc(sizeof(hsize_t)*rank)
+                H5Sget_regular_hyperslab(self.id, start_array, stride_array,
+                                         count_array, block_array)
+
+                for i in range(rank):
+                    start.append(start_array[i])
+                    stride.append(stride_array[i])
+                    count.append(count_array[i])
+                    block.append(block_array[i])
+
+                return (tuple(start), tuple(stride), tuple(count), tuple(block))
+
+            finally:
+                efree(start_array)
+                efree(stride_array)
+                efree(count_array)
+                efree(block_array)
diff --git a/h5py/h5t.pyx b/h5py/h5t.pyx
index 879f1a7..8931592 100644
--- a/h5py/h5t.pyx
+++ b/h5py/h5t.pyx
@@ -1078,6 +1078,7 @@ cdef class TypeCompoundID(TypeCompositeID):
         cdef int nfields
         field_names = []
         field_types = []
+        field_offsets = []
         nfields = self.get_nmembers()
 
         # First step: read field names and their Numpy dtypes into 
@@ -1087,6 +1088,7 @@ cdef class TypeCompoundID(TypeCompositeID):
             name = self.get_member_name(i)
             field_names.append(name)
             field_types.append(tmp_type.py_dtype())
+            field_offsets.append(self.get_member_offset(i))
 
 
         # 1. Check if it should be converted to a complex number
@@ -1104,7 +1106,7 @@ cdef class TypeCompoundID(TypeCompositeID):
         else:
             if sys.version[0] == '3':
                 field_names = [x.decode('utf8') for x in field_names]
-            typeobj = dtype(list(zip(field_names, field_types)))
+            typeobj = dtype({'names': field_names, 'formats': field_types, 'offsets': field_offsets})
 
         return typeobj
 
@@ -1247,17 +1249,17 @@ cdef class TypeEnumID(TypeCompositeID):
 # of NumPy dtype into an HDF5 type object.  The result is guaranteed to be
 # transient and unlocked.
 
-cdef dict _float_le = {2: IEEE_F16LE.id, 4: H5T_IEEE_F32LE, 8: H5T_IEEE_F64LE}
-cdef dict _float_be = {2: IEEE_F16BE.id, 4: H5T_IEEE_F32BE, 8: H5T_IEEE_F64BE}
+cdef dict _float_le = {2: H5Tcopy(IEEE_F16LE.id), 4: H5Tcopy(H5T_IEEE_F32LE), 8: H5Tcopy(H5T_IEEE_F64LE)}
+cdef dict _float_be = {2: H5Tcopy(IEEE_F16BE.id), 4: H5Tcopy(H5T_IEEE_F32BE), 8: H5Tcopy(H5T_IEEE_F64BE)}
 cdef dict _float_nt = _float_le if ORDER_NATIVE == H5T_ORDER_LE else _float_be
 
-cdef dict _int_le = {1: H5T_STD_I8LE, 2: H5T_STD_I16LE, 4: H5T_STD_I32LE, 8: H5T_STD_I64LE}
-cdef dict _int_be = {1: H5T_STD_I8BE, 2: H5T_STD_I16BE, 4: H5T_STD_I32BE, 8: H5T_STD_I64BE}
-cdef dict _int_nt = {1: H5T_NATIVE_INT8, 2: H5T_NATIVE_INT16, 4: H5T_NATIVE_INT32, 8: H5T_NATIVE_INT64}
+cdef dict _int_le = {1: H5Tcopy(H5T_STD_I8LE), 2: H5Tcopy(H5T_STD_I16LE), 4: H5Tcopy(H5T_STD_I32LE), 8: H5Tcopy(H5T_STD_I64LE)}
+cdef dict _int_be = {1: H5Tcopy(H5T_STD_I8BE), 2: H5Tcopy(H5T_STD_I16BE), 4: H5Tcopy(H5T_STD_I32BE), 8: H5Tcopy(H5T_STD_I64BE)}
+cdef dict _int_nt = {1: H5Tcopy(H5T_NATIVE_INT8), 2: H5Tcopy(H5T_NATIVE_INT16), 4: H5Tcopy(H5T_NATIVE_INT32), 8: H5Tcopy(H5T_NATIVE_INT64)}
 
-cdef dict _uint_le = {1: H5T_STD_U8LE, 2: H5T_STD_U16LE, 4: H5T_STD_U32LE, 8: H5T_STD_U64LE}
-cdef dict _uint_be = {1: H5T_STD_U8BE, 2: H5T_STD_U16BE, 4: H5T_STD_U32BE, 8: H5T_STD_U64BE}
-cdef dict _uint_nt = {1: H5T_NATIVE_UINT8, 2: H5T_NATIVE_UINT16, 4: H5T_NATIVE_UINT32, 8: H5T_NATIVE_UINT64} 
+cdef dict _uint_le = {1: H5Tcopy(H5T_STD_U8LE), 2: H5Tcopy(H5T_STD_U16LE), 4: H5Tcopy(H5T_STD_U32LE), 8: H5Tcopy(H5T_STD_U64LE)}
+cdef dict _uint_be = {1: H5Tcopy(H5T_STD_U8BE), 2: H5Tcopy(H5T_STD_U16BE), 4: H5Tcopy(H5T_STD_U32BE), 8: H5Tcopy(H5T_STD_U64BE)}
+cdef dict _uint_nt = {1: H5Tcopy(H5T_NATIVE_UINT8), 2: H5Tcopy(H5T_NATIVE_UINT16), 4: H5Tcopy(H5T_NATIVE_UINT32), 8: H5Tcopy(H5T_NATIVE_UINT64)} 
 
 cdef TypeFloatID _c_float(dtype dt):
     # Floats (single and double)
@@ -1514,7 +1516,7 @@ cpdef TypeID py_create(object dtype_in, bint logical=0):
                 elif vlen is unicode:
                     return _c_vlen_unicode()
                 elif vlen is not None:
-                    return vlen_create(py_create(vlen))
+                    return vlen_create(py_create(vlen, logical))
 
                 refclass = check_dtype(ref=dt)
                 if refclass is not None:
diff --git a/h5py/highlevel.py b/h5py/highlevel.py
index 2901c54..4f6c3cc 100644
--- a/h5py/highlevel.py
+++ b/h5py/highlevel.py
@@ -7,6 +7,15 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+# pylint: disable=unused-import
+
+"""
+    This is the deprecated legacy high-level interface.
+    
+    Everything here is canonically located at the root of the package.
+    New code should import directly from there, e.g. "from h5py import File".
+"""
+
 from __future__ import absolute_import
 
 from ._hl import filters
diff --git a/h5py/ipy_completer.py b/h5py/ipy_completer.py
index c9f34f3..fa098ef 100644
--- a/h5py/ipy_completer.py
+++ b/h5py/ipy_completer.py
@@ -11,34 +11,42 @@
 #
 #-
 
-"""
-h5py completer extension for ipython. This completer is automatically loaded
-when h5py is imported within ipython. It will let you do things like::
+# pylint: disable=eval-used,protected-access
 
-  f=File('foo.h5')
-  f['<tab>
-  # or:
-  f['ite<tab>
+"""
+    This is the h5py completer extension for ipython.  It is loaded by
+    calling the function h5py.enable_ipython_completer() from within an
+    interactive IPython session.
+    
+    It will let you do things like::
 
-which will do tab completion based on the subgroups of `f`. Also::
+      f=File('foo.h5')
+      f['<tab>
+      # or:
+      f['ite<tab>
 
-  f['item1'].at<tab>
+    which will do tab completion based on the subgroups of `f`. Also::
 
-will perform tab completion for the attributes in the usual way. This should
-also work::
+      f['item1'].at<tab>
 
-  a = b = f['item1'].attrs.<tab>
+    will perform tab completion for the attributes in the usual way. This should
+    also work::
 
-as should::
+      a = b = f['item1'].attrs.<tab>
 
-  f['item1/item2/it<tab>
+    as should::
 
+      f['item1/item2/it<tab>
 """
 
 from __future__ import absolute_import
 
 import posixpath
 import re
+import readline
+from ._hl.attrs import AttributeManager
+from ._hl.base import HLObject
+
 
 try:
     # >=ipython-1.0
@@ -53,15 +61,17 @@ except ImportError:
 try:
     # support >=ipython-0.11
     from IPython.utils import generics
-    from IPython import TryNext
 except ImportError:
     # support <ipython-0.11
     from IPython import generics
-    from IPython.ipapi import TryNext
-
-import readline
 
-from h5py.highlevel import AttributeManager, HLObject
+try:
+    from IPython.core.error import TryNext
+except ImportError:
+    try:
+        from IPython import TryNext
+    except ImportError:
+        from IPython.ipapi import TryNext
 
 re_attr_match = re.compile(r"(?:.*\=)?(.+\[.*\].*)\.(\w*)$")
 re_item_match = re.compile(r"""(?:.*\=)?(.*)\[(?P<s>['|"])(?!.*(?P=s))(.*)$""")
@@ -69,12 +79,13 @@ re_object_match = re.compile(r"(?:.*\=)?(.+?)(?:\[)")
 
 
 def _retrieve_obj(name, context):
+    """ Filter function for completion. """
+
     # we don't want to call any functions, but I couldn't find a robust regex
     # that filtered them without unintended side effects. So keys containing
     # "(" will not complete.
-    try:
-        assert '(' not in name
-    except AssertionError:
+    
+    if '(' in name:
         raise ValueError()
 
     try:
@@ -93,10 +104,10 @@ def h5py_item_completer(context, command):
 
     try:
         obj = _retrieve_obj(base, context)
-    except:
+    except Exception:
         return []
 
-    path, target = posixpath.split(item)
+    path, _ = posixpath.split(item)
     if path:
         items = (posixpath.join(path, name) for name in obj[path].iterkeys())
     else:
@@ -116,7 +127,7 @@ def h5py_attr_completer(context, command):
 
     try:
         obj = _retrieve_obj(base, context)
-    except:
+    except Exception:
         return []
 
     attrs = dir(obj)
@@ -154,6 +165,7 @@ def h5py_attr_completer(context, command):
 
 
 def h5py_completer(self, event):
+    """ Completer function to be loaded into IPython """
     base = re_object_match.split(event.line)[1]
 
     if not isinstance(self._ofind(base)['obj'], (AttributeManager, HLObject)):
@@ -173,6 +185,7 @@ def h5py_completer(self, event):
 
 
 def load_ipython_extension(ip=None):
+    """ Load completer function into IPython """
     if ip is None:
         ip = get_ipython()
     ip.set_hook('complete_command', h5py_completer, re_key=r"(?:.*\=)?(.+?)\[")
diff --git a/h5py/tests/hl/__init__.py b/h5py/tests/hl/__init__.py
index ac4c463..8712c6f 100644
--- a/h5py/tests/hl/__init__.py
+++ b/h5py/tests/hl/__init__.py
@@ -5,10 +5,14 @@ from . import  (test_dataset_getitem,
                 test_dataset_swmr,
                 test_dims_dimensionproxy,
                 test_file, 
-                test_attribute_create, )
+                test_attribute_create,
+                test_threads,
+                test_datatype, )
                 
 MODULES = ( test_dataset_getitem, 
             test_dataset_swmr, 
             test_dims_dimensionproxy,
             test_file,
-            test_attribute_create, )
+            test_attribute_create, 
+            test_threads,
+            test_datatype, )
diff --git a/h5py/tests/hl/test_dataset_getitem.py b/h5py/tests/hl/test_dataset_getitem.py
index a07f4c4..cf4b8e0 100644
--- a/h5py/tests/hl/test_dataset_getitem.py
+++ b/h5py/tests/hl/test_dataset_getitem.py
@@ -58,6 +58,14 @@ class TestEmpty(TestCase):
         dsid = h5py.h5d.create(self.f.id, b'x', tid, sid)
         self.dset = h5py.Dataset(dsid)
         
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.dset.ndim, 0)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.dset.shape, tuple())
+        
     def test_ellipsis(self):
         """ Ellipsis -> IOError """
         with self.assertRaises(IOError):
@@ -101,6 +109,14 @@ class TestScalarFloat(TestCase):
         TestCase.setUp(self)
         self.data = np.array(42.5, dtype='f')
         self.dset = self.f.create_dataset('x', data=self.data)
+        
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.dset.ndim, 0)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.dset.shape, tuple())
 
     def test_ellipsis(self):
         """ Ellipsis -> scalar ndarray """
@@ -147,6 +163,14 @@ class TestScalarCompound(TestCase):
         TestCase.setUp(self)
         self.data = np.array((42.5, -118, "Hello"), dtype=[('a', 'f'), ('b', 'i'), ('c', '|S10')])
         self.dset = self.f.create_dataset('x', data=self.data)
+        
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.dset.ndim, 0)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.dset.shape, tuple())
 
     def test_ellipsis(self):
         """ Ellipsis -> scalar ndarray """
@@ -201,6 +225,16 @@ class TestScalarArray(TestCase):
         self.data = np.array([(3.2, -119), (42, 99.8), (3.14, 0)], dtype='f')
         self.dset = self.f.create_dataset('x', (), dtype=self.dt)
         self.dset[...] = self.data
+        
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.data.ndim, 2)
+        self.assertEquals(self.dset.ndim, 0)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.data.shape, (3, 2))
+        self.assertEquals(self.dset.shape, tuple())
 
     def test_ellipsis(self):
         """ Ellipsis -> ndarray promoted to underlying shape """
@@ -246,6 +280,14 @@ class Test1DZeroFloat(TestCase):
         TestCase.setUp(self)
         self.data = np.ones((0,), dtype='f')
         self.dset = self.f.create_dataset('x', data=self.data)
+        
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.dset.ndim, 1)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.dset.shape, (0,))
 
     def test_ellipsis(self):
         """ Ellipsis -> ndarray of matching shape """
@@ -290,6 +332,14 @@ class Test1DFloat(TestCase):
         TestCase.setUp(self)
         self.data = np.arange(13).astype('f')
         self.dset = self.f.create_dataset('x', data=self.data)
+        
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.dset.ndim, 1)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.dset.shape, (13,))
 
     def test_ellipsis(self):
         self.assertNumpyBehavior(self.dset, self.data, np.s_[...])
@@ -398,6 +448,14 @@ class Test2DZeroFloat(TestCase):
         self.data = np.ones((0,3), dtype='f')
         self.dset = self.f.create_dataset('x', data=self.data)
         
+    def test_ndim(self):
+        """ Verify number of dimensions """
+        self.assertEquals(self.dset.ndim, 2)
+        
+    def test_shape(self):
+        """ Verify shape """
+        self.assertEquals(self.dset.shape, (0, 3))
+        
     @ut.expectedFailure
     def test_indexlist(self):
         """ see issue #473 """
diff --git a/h5py/tests/hl/test_datatype.py b/h5py/tests/hl/test_datatype.py
new file mode 100644
index 0000000..e7b5264
--- /dev/null
+++ b/h5py/tests/hl/test_datatype.py
@@ -0,0 +1,45 @@
+"""
+    Tests for the h5py.Datatype class.
+"""
+
+from __future__ import absolute_import
+
+import numpy as np
+import h5py
+
+from ..common import ut, TestCase
+
+class TestVlen(TestCase):
+
+    """
+        Check that storage of vlen strings is carried out correctly.
+    """
+    
+    def test_compound(self):
+
+        fields = []
+        fields.append(('field_1', h5py.special_dtype(vlen=str)))
+        fields.append(('field_2', np.int32))
+        dt = np.dtype(fields)
+        self.f['mytype'] = np.dtype(dt)
+        dt_out = self.f['mytype'].dtype.fields['field_1'][0]
+        self.assertEqual(h5py.check_dtype(vlen=dt_out), str)
+        
+    def test_vlen_enum(self):
+        fname = self.mktemp()
+        arr1 = [[1],[1,2]]
+        dt1 = h5py.special_dtype(vlen=h5py.special_dtype(
+            enum=('i', dict(foo=1, bar=2))))
+
+        with h5py.File(fname,'w') as f:
+            df1 = f.create_dataset('test', (len(arr1),), dtype=dt1)
+            df1[:] = np.array(arr1)
+
+        with h5py.File(fname,'r') as f:
+            df2  = f['test']
+            dt2  = df2.dtype
+            arr2 = [e.tolist() for e in df2[:]]
+
+        self.assertEqual(arr1, arr2)
+        self.assertEqual(h5py.check_dtype(enum=h5py.check_dtype(vlen=dt1)),
+                         h5py.check_dtype(enum=h5py.check_dtype(vlen=dt2)))
diff --git a/h5py/tests/hl/test_threads.py b/h5py/tests/hl/test_threads.py
new file mode 100644
index 0000000..966f32c
--- /dev/null
+++ b/h5py/tests/hl/test_threads.py
@@ -0,0 +1,61 @@
+# This file is part of h5py, a Python interface to the HDF5 library.
+#
+# http://www.h5py.org
+#
+# Copyright 2008-2013 Andrew Collette and contributors
+#
+# License:  Standard 3-clause BSD; see "license.txt" for full license terms
+#           and contributor agreement.
+
+"""
+    Tests the h5py.File object.
+"""
+
+from __future__ import absolute_import
+
+import threading
+import h5py
+
+from ..common import ut, TestCase
+
+
+class TestErrorPrinting(TestCase):
+
+    """
+        Verify the error printing is squashed in all threads.
+    """
+    
+    def test_printing(self):
+        """ No console messages should be shown from containership tests """
+        # Unfortunately we can't have this test assert anything, as
+        # HDF5 writes directly to stderr.  But it will show up in the
+        # console output.
+    
+        import threading
+
+        def test():
+            with h5py.File(self.mktemp(), 'w') as newfile:
+                try:
+                    doesnt_exist = newfile['doesnt_exist'].value
+                except KeyError:
+                    pass
+
+        th = threading.Thread(target=test)
+        th.start()
+        th.join()
+
+    def test_attr_printing(self):
+        """ No console messages should be shown for non-existing attributes """
+        
+        def test():
+        
+            with h5py.File(self.mktemp(), 'w') as newfile:
+                newfile['newdata'] = [1,2,3]
+                try:
+                    nonexistent_attr = newfile['newdata'].attrs['nonexistent_attr']
+                except KeyError:
+                    pass
+
+        th = threading.Thread(target=test)
+        th.start()
+        th.join()
diff --git a/h5py/tests/old/test_dataset.py b/h5py/tests/old/test_dataset.py
index 24e7182..26d6e05 100644
--- a/h5py/tests/old/test_dataset.py
+++ b/h5py/tests/old/test_dataset.py
@@ -772,6 +772,22 @@ class TestCompound(BaseDataset):
         self.assertTrue(np.all(outdata == testdata))
         self.assertEqual(outdata.dtype, testdata.dtype)
 
+    def test_assign(self):
+        dt = np.dtype( [ ('weight', (np.float64, 3)),
+                         ('endpoint_type', np.uint8), ] )
+
+        testdata = np.ndarray((16,), dtype=dt)
+        for key in dt.fields:
+            testdata[key] = np.random.random(size=testdata[key].shape)*100
+
+        ds = self.f.create_dataset('test', (16,), dtype=dt)
+        for key in dt.fields:
+            ds[key] = testdata[key]
+
+        outdata = self.f['test'][...]
+
+        self.assertTrue(np.all(outdata == testdata))
+        self.assertEqual(outdata.dtype, testdata.dtype)
 
 class TestEnum(BaseDataset):
 
diff --git a/h5py/version.py b/h5py/version.py
index 4d65d38..ead9609 100644
--- a/h5py/version.py
+++ b/h5py/version.py
@@ -7,6 +7,10 @@
 # License:  Standard 3-clause BSD; see "license.txt" for full license terms
 #           and contributor agreement.
 
+"""
+    Versioning module for h5py.
+"""
+
 from __future__ import absolute_import
 
 from . import h5 as _h5
@@ -14,7 +18,7 @@ from distutils.version import StrictVersion as _sv
 import sys
 import numpy
 
-version = "2.5.0"
+version = "2.6.0"
 
 _exp = _sv(version)
 
@@ -26,12 +30,6 @@ hdf5_version = "%d.%d.%d" % hdf5_version_tuple
 api_version_tuple = (1,8)
 api_version = "1.8"
 
-__doc__ = """\
-This is h5py **%s**
-
-* HDF5 version: **%s**
-""" % (version, hdf5_version)
-
 info = """\
 Summary of the h5py configuration
 ---------------------------------
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..045df2f
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,377 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=tests
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=numpy,h5py
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+#
+#      | Checkers                 | Broken import checks     | Other random garbage
+disable=format,design,similarities,cyclic-import,import-error,broad-except,no-self-use,no-name-in-module,invalid-name,abstract-method,star-args,import-self,no-init,locally-disabled,unidiomatic-typecheck
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#
+#     | Some format checks which are OK
+enable=bad-indentation,mixed-indentation,unnecessary-semicolon,superfluous-parens
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=stringprep,optparse
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/setup.py b/setup.py
index 1108fa8..5ed287d 100755
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,17 @@ import os.path as op
 import setup_build, setup_configure
 
 
-VERSION = '2.5.0'
+VERSION = '2.6.0'
+
+NUMPY_DEP = 'numpy>=1.6.1'
+
+# these are required to use h5py
+RUN_REQUIRES = [NUMPY_DEP, 'six']
+
+# these are required to build h5py
+# RUN_REQUIRES is included as setup.py test needs RUN_REQUIRES for testing
+# RUN_REQUIRES can be removed when setup.py test is removed
+SETUP_REQUIRES = RUN_REQUIRES + [NUMPY_DEP, 'Cython>=0.19', 'pkgconfig']
 
 
 # --- Custom Distutils commands -----------------------------------------------
@@ -134,8 +144,7 @@ setup(
   packages = ['h5py', 'h5py._hl', 'h5py.tests', 'h5py.tests.old', 'h5py.tests.hl'],
   package_data = package_data,
   ext_modules = [Extension('h5py.x',['x.c'])],  # To trick build into running build_ext
-  requires = ['numpy (>=1.6.1)', 'Cython (>=0.17)'],
-  install_requires = ['numpy>=1.6.1', 'Cython>=0.17', 'six'],
-  setup_requires = ['pkgconfig', 'six'],
+  install_requires = RUN_REQUIRES,
+  setup_requires = SETUP_REQUIRES,
   cmdclass = CMDCLASS,
 )
diff --git a/setup_build.py b/setup_build.py
index bc3a8b0..6e3468b 100644
--- a/setup_build.py
+++ b/setup_build.py
@@ -33,7 +33,7 @@ MODULES =  ['defs','_errors','_objects','_proxy', 'h5fd', 'h5z',
 
 EXTRA_SRC = {'h5z': [ localpath("lzf/lzf_filter.c"),
               localpath("lzf/lzf/lzf_c.c"),
-              localpath("lzf/lzf/lzf_d.c")]} 
+              localpath("lzf/lzf/lzf_d.c")]}
 
 
 if sys.platform.startswith('win'):
@@ -42,7 +42,7 @@ if sys.platform.startswith('win'):
         'include_dirs'  : [localpath('lzf'), localpath('windows')],
         'library_dirs'  : [],
         'define_macros' : [('H5_USE_16_API', None), ('_HDF5USEDLL_', None)] }
-        
+
 else:
     COMPILER_SETTINGS = {
        'libraries'      : ['hdf5', 'hdf5_hl'],
@@ -56,16 +56,16 @@ class h5py_build_ext(build_ext):
     """
         Custom distutils command which encapsulates api_gen pre-building,
         Cython building, and C compilation.
-        
+
         Also handles making the Extension modules, since we can't rely on
         NumPy being present in the main body of the setup script.
     """
-    
+
     @staticmethod
     def _make_extensions(config):
         """ Produce a list of Extension instances which can be passed to
         cythonize().
-        
+
         This is the point at which custom directories, MPI options, etc.
         enter the build process.
         """
@@ -83,11 +83,18 @@ class h5py_build_ext(build_ext):
         except EnvironmentError:
             pass
 
-        settings['include_dirs'] += [numpy.get_include()]
+        try:
+            numpy_includes = numpy.get_include()
+        except AttributeError:
+            # if numpy is not installed get the headers from the .egg directory
+            import numpy.core
+            numpy_includes = os.path.join(os.path.dirname(numpy.core.__file__), 'include')
+
+        settings['include_dirs'] += [numpy_includes]
         if config.mpi:
             import mpi4py
             settings['include_dirs'] += [mpi4py.get_include()]
-            
+
         # Ensure a custom location appears first, so we don't get a copy of
         # HDF5 from some default location in COMPILER_SETTINGS
         if config.hdf5 is not None:
@@ -97,14 +104,14 @@ class h5py_build_ext(build_ext):
         # TODO: should this only be done on UNIX?
         if os.name != 'nt':
             settings['runtime_library_dirs'] = settings['library_dirs']
-        
+
         def make_extension(module):
             sources = [localpath('h5py', module+'.pyx')] + EXTRA_SRC.get(module, [])
             return Extension('h5py.'+module, sources, **settings)
 
         return [make_extension(m) for m in MODULES]
-        
-        
+
+
     @staticmethod
     def run_system_cython(pyx_files):
         try:
@@ -136,7 +143,7 @@ class h5py_build_ext(build_ext):
 
     def run(self):
         """ Distutils calls this method to run the command """
-        
+
         from Cython.Build import cythonize
 
         # Provides all of our build options
@@ -146,35 +153,44 @@ class h5py_build_ext(build_ext):
         defs_file = localpath('h5py', 'defs.pyx')
         func_file = localpath('h5py', 'api_functions.txt')
         config_file = localpath('h5py', 'config.pxi')
-             
+
         # Rebuild low-level defs if missing or stale
         if not op.isfile(defs_file) or os.stat(func_file).st_mtime > os.stat(defs_file).st_mtime:
             print("Executing api_gen rebuild of defs")
             api_gen.run()
-            
+
         # Rewrite config.pxi file if needed
         if not op.isfile(config_file) or config.rebuild_required:
             with open(config_file, 'wb') as f:
+                if config.mpi:
+                    import mpi4py
+                    from distutils.version import StrictVersion
+                    v2 = StrictVersion(mpi4py.__version__) > StrictVersion("1.3.1")
+                else:
+                    v2 = False
                 s = """\
 # This file is automatically generated by the h5py setup script.  Don't modify.
 
 DEF MPI = %(mpi)s
+DEF MPI4PY_V2 = %(mpi4py_v2)s
 DEF HDF5_VERSION = %(version)s
 DEF SWMR_MIN_HDF5_VERSION = (1,9,178)
+DEF VDS_MIN_HDF5_VERSION = (1,9,233)
 """
                 s %= {'mpi': bool(config.mpi),
+                      'mpi4py_v2': bool(v2),
                       'version': tuple(int(x) for x in config.hdf5_version.split('.'))}
                 s = s.encode('utf-8')
                 f.write(s)
-        
+
         # Run Cython
         print("Executing cythonize()")
         self.extensions = cythonize(self._make_extensions(config),
                             force=config.rebuild_required or self.force)
         self.check_rerun_cythonize()
-        
+
         # Perform the build
         build_ext.run(self)
-        
+
         # Mark the configuration as built
         config.reset_rebuild()
diff --git a/tox.ini b/tox.ini
index 9af6228..f565c8b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,16 @@
 [tox]
-envlist = py26,py27,py32,py33,py34
+envlist = {py26,py27,py32,py33,py34}-{test,pylint}-{nodeps,deps}
+
 [testenv]
 deps =
-    numpy>=1.0.1
-    Cython>=0.13
+    deps: numpy>=1.6.1
+    deps: cython>=0.19
+    pylint: astroid>=1.3,<1.4
+    pylint: pylint>=1.4,<1.5
+    py26: unittest2
 commands =
-    python setup.py test
-[testenv:py26]
-deps =
-    unittest2
-    {[testenv]deps}
+    test: python -c "from sys import exit; import h5py; exit(0) if h5py.run_tests().wasSuccessful() else exit(1)"
+    pylint: pylint h5py
+changedir =
+    test: {toxworkdir}
+    pylint: {toxinidir}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/h5py.git



More information about the debian-science-commits mailing list