Import python-fitsio_1.1.4+dfsg.orig.tar.xz
authorOle Streicher <olebole@debian.org>
Wed, 24 Feb 2021 09:09:50 +0000 (10:09 +0100)
committerOle Streicher <olebole@debian.org>
Wed, 24 Feb 2021 09:09:50 +0000 (10:09 +0100)
[dgit import orig python-fitsio_1.1.4+dfsg.orig.tar.xz]

33 files changed:
.gitignore [new file with mode: 0644]
.travis.yml [new file with mode: 0644]
CHANGES.md [new file with mode: 0644]
LICENSE.txt [new file with mode: 0644]
MANIFEST.in [new file with mode: 0644]
PKG-INFO [new file with mode: 0644]
README.md [new file with mode: 0644]
fitsio.egg-info/PKG-INFO [new file with mode: 0644]
fitsio.egg-info/SOURCES.txt [new file with mode: 0644]
fitsio.egg-info/dependency_links.txt [new file with mode: 0644]
fitsio.egg-info/requires.txt [new file with mode: 0644]
fitsio.egg-info/top_level.txt [new file with mode: 0644]
fitsio/__init__.py [new file with mode: 0644]
fitsio/fitsio_pywrap.c [new file with mode: 0644]
fitsio/fitslib.py [new file with mode: 0644]
fitsio/hdu/__init__.py [new file with mode: 0644]
fitsio/hdu/base.py [new file with mode: 0644]
fitsio/hdu/image.py [new file with mode: 0644]
fitsio/hdu/table.py [new file with mode: 0644]
fitsio/header.py [new file with mode: 0644]
fitsio/test.py [new file with mode: 0644]
fitsio/test_images/test_gzip_compressed_image.fits.fz [new file with mode: 0644]
fitsio/util.py [new file with mode: 0644]
patches/README.md [new file with mode: 0644]
patches/build_cfitsio_patches.py [new file with mode: 0644]
patches/configure.in.patch [new file with mode: 0644]
patches/configure.patch [new file with mode: 0644]
patches/drvrnet.c.patch [new file with mode: 0644]
patches/fitscore.c.patch [new file with mode: 0644]
patches/fitsio.h.patch [new file with mode: 0644]
patches/putcols.c.patch [new file with mode: 0644]
setup.cfg [new file with mode: 0644]
setup.py [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..510c73d
--- /dev/null
@@ -0,0 +1,114 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/.travis.yml b/.travis.yml
new file mode 100644 (file)
index 0000000..92e7c20
--- /dev/null
@@ -0,0 +1,63 @@
+language: c
+os: linux
+arch: ppc64le
+
+# use the containers for a faster build
+sudo: false
+
+env:
+    - PYTHON_VERSION=3.7 NUMPY_VERSION=1.16
+
+before_install:
+    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
+        wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh;
+      else
+        if [[ "$TRAVIS_CPU_ARCH" == "ppc64le" ]]; then
+          wget  https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-ppc64le.sh -O miniconda.sh;
+        else
+          wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
+        fi
+      fi
+    - chmod +x miniconda.sh
+    - ./miniconda.sh -b -p $HOME/miniconda
+    - export PATH=$HOME/miniconda/bin:$PATH
+    - conda update --yes conda
+    - wget https://heasarc.gsfc.nasa.gov/FTP/software/fitsio/c/cfitsio-3.49.tar.gz
+    - tar -xzvf cfitsio-3.49.tar.gz
+    - (cd cfitsio-3.49; ./configure --disable-shared --prefix=$HOME/cfitsio-static-install; make install -j 4)
+
+install:
+    - conda create --yes -n test python=$PYTHON_VERSION
+    - source activate test
+    - conda install --yes numpy=$NUMPY_VERSION nose cython
+    - if [ $PYTHON_VERSION == 2.6 ]; then conda install --yes argparse; fi
+
+script:
+    - FITSIO_INSTALL_DIR=$HOME/fitsio-temp-install
+    - export PYTHONPATH=$FITSIO_INSTALL_DIR/lib/python$PYTHON_VERSION/site-packages:$PYTHONPATH
+    - mkdir -p $FITSIO_INSTALL_DIR/lib/python$PYTHON_VERSION/site-packages
+
+    - export INSTALLFLAGS="--prefix=$FITSIO_INSTALL_DIR --single-version-externally-managed --record rec.txt"
+    - python setup.py install $INSTALLFLAGS
+
+    - pushd /tmp
+    - SKIP_BZIP_TEST=true python -c "import fitsio; fitsio.test.test()"
+    - popd
+    - rm -rf $FITSIO_INSTALL_DIR
+
+    - python setup.py clean -a
+    - mkdir -p $FITSIO_INSTALL_DIR/lib/python$PYTHON_VERSION/site-packages
+    - python setup.py install $INSTALLFLAGS build_ext --use-system-fitsio --system-fitsio-includedir=$HOME/cfitsio-static-install/include --system-fitsio-libdir=$HOME/cfitsio-static-install/lib
+
+    - pushd /tmp
+    - SKIP_BZIP_TEST=true SKIP_HCOMPRESS_U2_TEST=true python -c "import fitsio; fitsio.test.test()"
+    - popd
+    - rm -rf $FITSIO_INSTALL_DIR
+
+    - python setup.py clean -a
+    - python setup.py build
+    - python setup.py clean -a
+    - python setup.py build_ext
+
+#notifications:
+#  email: false
diff --git a/CHANGES.md b/CHANGES.md
new file mode 100644 (file)
index 0000000..2e6e857
--- /dev/null
@@ -0,0 +1,646 @@
+version 1.1.4
+---------------------------------
+
+New Features
+
+    - Moved most testing to GitHub actions (linux, osx).
+    - Added testing on ppc64le w/ TravisCI (thanks @asellappen)
+
+Bug Fixes
+
+    - Don't remove BLANK keywords in header clean
+    - Preserve order of comments in header
+
+Compatibility changes
+
+    - moved to sing `bool` rather than `np.bool` to be compativle
+      with numpy 1.2
+
+version 1.1.3
+---------------------------------
+
+This release moves to cfitsio 3.49, which has bug fixes and now properly
+supports reading certain classes of lossless compressed files
+
+New Features
+
+    - Added keywords to control compression
+        - qlevel control the quantization level
+        - qmethod set the quantization method
+        - hcomp_scale, hcomp_smooth HCOMPRESS specific settings
+
+        A nice result of this is that one can do lossless gzip compression
+        (setting qlevel=0) and
+    - Work around some types of garbage characters that might appear
+      in headers
+
+BACKWARDS INCOMPATIBLE CHANGES
+
+    - non-ascii junk in headers is replaced by ascii characters to
+      avoid segmentation faults in the python standard library
+      when non-unicode characters are detected.  This will cause
+      codes that check for consistency between copied headers
+      to fail, since the header data is modified.
+
+Bug Fixes
+
+    - Write integer keywords using the long long support rather than long
+    - Fix bug where a new file is started and the user can access a
+      fictional HDU, causing book keeping problems
+    - Return zero length result when requested rows have
+      zero length (rainwoodman)
+
+version 1.1.2
+---------------------------------
+
+Bug Fixes
+
+    - Fixed deprecation warnings for extra keyword arguments.
+    - Fixed SyntaxWarning: "is" with a literal (Michka Popoff)
+
+version 1.1.1
+---------------------------------
+
+Bug Fixes
+
+    - Fix bug in drvrnet.c in printf statement, causing compile
+      issues on some systems.
+
+version 1.1.0
+---------------------------------
+
+Bumping the minor version due to the update of the cfitsio version
+
+This reverts to the behavior that compression settings are set as a toggle,
+which is the cfitsio convention.  The user needs to turn compression on and off
+selectively.  The alternative behavior, introduced in 1.0.1, broke the mode
+where compression is set in the filename, as well as breaking with convention.
+
+New Features
+
+    - Updated to cfitsio version 3.470 (#261)
+    - Add ability to stride (step value) when slicing (Dustin Jenkins)
+    - Add feature to flip along axis when slicing (Dustin Jenkins)
+    - Feature to ignore image scaling (Dustin Jenkins)
+
+Bug Fixes
+
+    - Fix error reading with an empty rows argument (rainwoodman)
+    - Fix bug when reading slice with step, but no start/stop (Mike Jarvis)
+    - Fix bug with clobber when compression is sent in filename
+
+
+Deprecations
+
+    - Removed the use of `**kwargs` in various read/write routines. This
+      pattern was causing silent bugs. All functions now use explicit
+      keyword arguments. A warning will be raised in any keyword arguments
+      are passed. In version `1.2`, this warning will become an error.
+
+version 1.0.5
+---------------------------------
+
+Bug Fixes
+
+    - fixed bug getting `None` keywords
+    - fixed bug writing 64 bit images (#256, #257)
+    - fixed HISTORY card value not being read
+
+version 1.0.4
+---------------------------------
+
+New Features
+
+    - support for empty keywords in header, which are supported
+      by the standard and are used for cosmetic comments
+
+Bug Fixes
+
+    - Fix for inserting bit columns and appending data with bitcols
+    - deal with non-standard header values such as NAN
+      [by returning as strings
+    - fixed many bugs reading headers; these were a casualty of
+      the header reading optimizations put in for  1.0.1
+
+version 1.0.3
+---------------------------------
+
+This is a bug fix release
+
+Bug Fixes
+
+    - The new header reading code did not deal properly with some
+      HIERARCH non-standard header key values.
+
+version 1.0.2
+---------------------------------
+
+This is a bug fix release
+
+Bug Fixes
+
+    - the read_header function was not treating the case_sensitive
+      keyword properly (Stephen Bailey)
+
+version 1.0.1
+---------------------------------
+
+Backwards Incompatible Changes
+
+    - Support for python 3 strings.
+    - Support for proper string null termination.  This means you can read back exactly
+      what you wrote.  However this departs from previous fitsio which used
+      the non-standard cfitsio convention of padding strings with spaces.
+    - Scalar indexing of FITS objects now returns a scalar, consistent
+      with numpy indexing rules (rainwoodman)
+
+New Features
+
+    - Installation moved to setuptools from distutils.
+    - Bundling of cfitsio now done with patches against the upstream
+      version instead of direct edits to the upstream code.
+    - Speed improvements for the read_header conveniance function, and
+      reading of headers in general.
+
+Bug Fixes
+
+    - CONTINUE in headers are now properly read.  Note there is a corner
+      case that is mis-handled by the underlying cfitsio library.  A bug
+      report has been sent.  (thanks for help with Alex Drlica-Wagner
+      identifying and testing this issue)
+    - Fixed bug where some long strings were not properly written to headers
+    - Fixed bug where compression settings for an open FITS object was inherited
+      from the previous HDU by a new HDU
+    - Fixed bug where comment strings were lost when setting the value in
+      a FITSHDR entry
+    - Fixed bug where get_comment was raising ValueError rather than KeyError
+    - For py3 need to ensure by hand that strings sizes are greater than 0
+
+Deprecations
+
+    - removed `convert` keyword in `FITSRecord` and `FITSHDR` classes.
+
+version 0.9.12
+---------------------------------
+
+New Features
+
+    - Deal properly with undefined value header entries
+    - can delete rows from a table
+    - can insert rows with resize()
+    - can create empty HDU extension for extensions beyond 0 (Felipe Menanteau)
+    - sanitize string input for py3
+    - GZIP_2 compression support (Felipe Menanteau)
+    - Improvements to python packaging for easier installation.
+    - Using cfitsio 3.430 now with patches for known bugs
+    - Now support reading and writing bit columns (Eli Rykoff)
+    - Can now read CONTINUE keywords in headers.  It is currently
+      treated as a comment; full implementation to come. (Alex Drlica-Wagner)
+    - Can now use a standard key dict when writing a header key using
+      the write_key method via `**`, e.g. `write_key(**key_dict)`
+      (Alex Drlica-Wagner)
+    - Delete row sets and row ranges using the delete_rows() method
+      for tables
+    - Resize tables, adding or removing rows, using the resize() method for
+      tables
+    - make write_key usable with standard dictionary using the `**keydict`
+      style
+    - allow writing empty HDUs after the first one using
+        ignore_empty=True to the FITS constructor or
+        the write convenience function (Felipe Menanteau)
+        We might make this the default in the future if
+        it is found to be benign
+
+Bug Fixes
+
+    - Only raise exception when PLIO u4/u8 is selected now that u1/u2 is supported
+      in cfitsio (Eli Rykoff)
+    - link curl library if cfitsio linked to it
+    - don't require numpy to run setup (Simon Conseil)
+    - strings with understores in headers, such as `1_000_000` are now not converted to numbers in py3
+    - check that the input fields names for tables are unique after converting
+      to upper case
+    - link against libm explicitly for compatibility on some systems
+
+
+version 0.9.11
+---------------------------------
+
+New Features
+
+    - Added trim_strings option to constructor and as keyword for read methods.
+      If trim_strings=True is set, white space is trimmed from the end
+      of all string columns upon reading.  This was introduced because
+      cfitsio internally pads strings out with spaces to the full column
+      width when writing, against the FITS standard.
+
+    - Added read_raw() method to the FITS class, to read the raw underlying data
+      from the file (Dustin Lang)
+
+Bug Fixes
+
+    - Fix bug reading hierarch keywords. recent changes to keyword parsing had
+      broken reading of hierarch keywords
+    - Fix for strings that look like expressions, e.g. '3-4' which were
+      being evaluated rather than returned as strings.
+    - Fix bug for missing key in FITSHDR object using the hdr[key]
+      notation.  Also raise KeyError rather than ValueError
+
+version 0.9.10
+---------------
+
+Bug Fixes
+
+    - Fix variable length string column copying in python 3
+    - Fix bug checking for max size in a variable length table column.
+    - Raise an exception when writing to a table with data
+      that has shape ()
+    - exit test suite with non-zero exit code if a test fails
+
+Continuous integration
+
+    - the travis ci now runs unit tests, ignoring those that may fail
+      when certain libraries/headers are not installed on the users system (for
+      now this is only bzip2 support)
+    - only particular pairs of python version/numpy version are tested
+
+python3 compatibility
+
+    - the compatibility is now built into the code rather than
+      using 2to3 to modify code at install time.
+
+Workarounds
+
+    - It turns out that when python, numpy etc. are compiled with gcc 4*
+      and fitsio is compiled with gcc 5* there is a problem, in some cases,
+      reading from an array with not aligned memory.  This has to do with using
+      the -O3 optimization flag when compiling cfitsio.  For replacing -O3 with
+      -O2 fixes the issue.  This was an issue on linux in both anaconda python2
+      and python3.
+
+
+version 0.9.9.1
+----------------------------------
+
+New tag so that pypi will accept the updated version
+
+version 0.9.9
+----------------------------------
+
+New Features
+
+    - header_start, data_start, data_end now available in the
+      info dictionary, as well as the new get_offsets() method
+      to access these in a new dict.
+      (thanks Dimitri Muna for the initial version of this)
+
+Bug Fixes
+
+    - Fix bug when writing new COMMENT fields (thanks Alex Drlica-Wagner for
+      initial fix)
+    - deal correctly with aligned data in some scenarios
+      (thanks Ole Streicher)
+    - use correct data type long for tile_dims_fits in
+      the set_compression C code.  This avoids a crash
+      on 32 but systems. (thanks Ole Streicher)
+    - use correct data type npy_int64 for pointer in
+      get_long_slices (this function is not not correctly
+      named).  Avoids crash on some 32 bit systems.
+      (thanks Ole Streicher)
+    - use correct data type npy_int64 for pointer in
+      PyFITSObject_create_image_hdu, rather than npy_intp.
+      (thanks Ole Streicher)
+
+version 0.9.8
+----------------------------------
+
+New Features
+
+    - added read_scamp_head function to read the .head files output
+        by SCAMP and return a FITSHDR object
+    - reserved header space when creating image and table extensions
+        and a header is being written.  This can improve performance
+        substantially, especially on distributed file systems.
+    - When possible write image data at HDU creation.  This can
+        be a big performance improvement, especially on distributed file
+        systems.
+    - Support for reading bzipped FITS files.  (Dustin Lang)
+
+    - Added option to use the system CFITSIO instead of the bundled one,
+        by sending --use-system-fitsio. Strongly recommend only use cfitsio
+        that are as new as the bundled one.  Also note the bundled cfitsio
+        sometimes contains patches that are not yet upstream in an
+        official cfitsio release
+    - proper support for reading unsigned images compressed with PLIO.
+        This is a patch directly on the cfitsio code base.  The same
+        code is in the upstream, but not yet released.
+    - New method reshape(dims) for images
+    - When writing into an existing image HDU, and larger dimensions
+        are required, the image is automatically expanded.
+
+Bug Fixes
+
+    - Fixed broken boolean fields in new versions of numpy (rainwoodman) Fixed
+    - bug when image was None (for creating empty first HDU) removed -iarch in
+    - setup.py for mac OS X.  This should
+        work for versions Mavericks and Snow Leapard (Christopher Bonnett)
+    - Reading a single string column was failing in some cases, this
+        has been fixed
+    - When creating a TableColumnSubset using [cols], the existence
+        of the columns is checked immediately, rather than waiting for the
+        check in the read()
+    - make sure to convert correct endianness when writing during image HDU
+        creation
+    - Corrected the repr for single column subsets
+    - only clean bzero,bscale,bunit from headers for TableHDU
+
+Dev features
+
+    - added travis ci
+
+version 0.9.7
+----------------------------------
+
+New Features
+
+    - python 3 compatibility
+    - Adding a new HDU is now near constant time
+    - Can now create an empty image extension using create_image_hdu
+        and sending the dims= and dtype= keywords
+    - Can now write into a sub-section of an existing image using the
+        start= keyword.
+    - Can now use a scalar slice for reading images, e.g.
+        hdu[row1:row2, col]
+      although this still currently retains the extra dimension
+    - Use warnings instead of printing to stdout
+    - IOError is now used to indicate a number of errors that
+        were previously ValueError
+
+
+version 0.9.6
+--------------
+
+New Features
+
+    - use cfitsio 3370 to support new tile compression features
+    - FITSRecord class to encapsulate all the ways one can represent header
+      records.  This is now used internally in the FITSHDR class instead of raw
+      dicts, but as FITSRecord inherits from dict this should be transparent.
+    - FITSCard class; inherits from FITSRecord and is a special case for header
+      card strings
+    - One can directly add a fits header card string to the FITSHDR object
+      using add_record
+
+Bug Fixes
+
+    - use literal_eval instead of eval for evaluating header values (D. Lang)
+    - If input to write_keys is a FITSHDR, just use it instead of creating a
+      new FITSHDR object. (D. Lang)
+    - update existing keys when adding records to FITSHDR, except for
+      comment and history fields.
+    - fixed bug with empty string in header card
+    - deal with cfitsio treating first 4 comments specially
+
+version 0.9.5
+--------------------------------
+
+Note the version 0.9.4 was skipped because some people had been using the
+master branch in production, which had version 0.9.4 set.  This will allow
+automatic version detection to work.  In the future master will not have
+the next version set until release.
+
+New Features
+
+    - Re-factored code to use sub-classes for each HDU type.  These are called
+      ImageHDU, TableHDU, and AsciiTableHDU.
+    - Write and read 32-bit and 64-bit complex table columns
+    - Write and read boolean table columns (contributed by Dustin Lang)
+    - Specify tile dimensions for compressed images.
+    - write_comment and write_history methods added.
+    - is_compressed() for image HDUs, True if tile compressed.
+    - added `**keys` to the image hdu reading routines to provide a more uniform
+      interface for all hdu types
+
+Bug Fixes
+
+    - Correct appending to COMMENT and HISTORY fields when writing a full
+      header object.
+    - Correct conversion of boolean keywords, writing and reading.
+    - Strip out compression related reserved keywords when writing a
+      user-provided header.
+    - Simplified reading string columns in ascii tables so that certain
+      incorrectly formatted tables from  CASUTools are now read accurately.
+      The change was minimal and did not affect reading well formatted tables,
+      so seemed worth it.
+    - Support non-standard TSHORT and TFLOAT columns in ascii tables as
+      generated by CASUTools.  They are non-standard but supporting them
+      does not seem to break anything (pulled from Simon Walker).
+
+All changes E. Sheldon except where noted.
+
+version 0.9.3
+--------------------------
+New Features
+
+    - Can write lists of arrays and dictionaries of arrays
+      to fits tables.
+    - Added iteration over HDUs in FITS class
+    - Added iteration to the FITSHDU object
+    - Added iteration to the FITSHDR header object
+    - added checking that a hdu exists in the file, either
+        by extension number or name, using the "in" syntax.  e.g.
+            fits=fitsio.FITS(filename)
+            if 'name' in fits:
+                data=fits['name'].read()
+    - added `**keys` to the read_header function
+    - added get_exttype() to the FITSHDU class
+        'BINARY_TBL' 'ASCII_TBL' 'IMAGE_HDU'
+    - added get_nrows() for binary tables
+    - added get_colnames()
+    - added get_filename()
+    - added get_info()
+    - added get_nrows()
+    - added get_vstorage()
+    - added is_compressed()
+    - added get_ext()
+
+minor changes
+
+    - raise error on malformed TDIM
+
+Backwards incompatible changes
+
+    - renamed some attributes; use the getters instead
+        - `colnames` -> `_colnames`
+        - `info` -> `_info`
+        - `filename` -> `_filename`
+        - `ext` -> `_ext`
+        - `vstorage` -> `_vstorage`
+        - `is_comparessed` -> `_is_compressed`
+            ( use the getter )
+
+Bug Fixes
+
+    - newer numpys (1.6.2) were barfing adding a python float to u4 arrays.
+    - Give a more clear error message for malformed TDIM header keywords
+    - fixed bug displaying column info for string array columns in tables
+    - got cfitsio patch to deal with very large compressed images, which were
+      not read properly.  This is now in the latest cfitsio.
+    - implemented workaround for bug where numpy declareds 'i8' arrays as type
+      npy_longlong, which is not correct.
+    - fixed bug in order of iteration of HDUs
+
+version 0.9.2
+--------------------------
+
+New Features
+
+    - Much faster writing to tables when there are many columns.
+    - Header object now has a setitem feature
+        h['item'] = value
+    - Header stores values now instead of the string rep
+    - You can force names of fields read from tables to upper
+      or lower case, either during construction of the FITS object
+      using or at read time using the lower= and upper= keywords.
+
+bug fixes
+    - more sensible data structure for header keywords.  Now works in all known
+      cases when reading and rewriting string fields.
+
+version 0.9.1
+-------------------------
+
+New features
+
+    - Added reading of image slices, e.g. `f[ext][2:25, 10:100]`
+    - Added insert_column(name, data, colnum=) method for HDUs., 2011-11-14 ESS
+    - Added a verify_checksum() method for HDU objects. 2011-10-24, ESS
+    - Headers are cleaned of required keyword before writing.  E.g. if you have
+      with fitsio.FITS(file,'rw') as fits:
+        fits.write(data, header=h)
+      Keywords like NAXIS, TTYPE* etc are removed.  This allows you to read
+      a header from a fits file and write it to another without clobbering
+      the required keywords.
+
+    - when accessing a column subset object, more metadata are shown
+        `f[ext][name]`
+    - can write None as an image for extension 0, as supported by
+      the spirit standard.  Similarly reading gives None in that case.
+    - the setup.py is now set up for registering versions to pypi.
+
+bug fixes
+
+    - Fixed bug that occured sometimes when reading individual columns where a
+      few bytes were not read.  Now using the internal cfitsio buffers more
+      carefully.
+
+    - Using fits_read_tblbytes when reading full rows fixes a bug that showed
+      up in a particular file.
+
+    - required header keywords are stripped from input header objects before
+      writing.
+
+version 0.9.0 (2011-10-21)
+-------------------------
+
+This is the first "official" release. A patched version of cfitsio 3.28 is now
+bundled.  This will make it easier for folks to install, and provide a
+consistent code base with which to develop.  Thanks to Eli Rykoff for
+suggesting a bundle.  Thanks to Eli and Martin White for helping extensively
+with testing.
+
+On OS X, we now link properly with universal binaries on intel. Thanks to Eli
+Rykoff for help with OS X testing and bug fixes.
+
+New features
+
+
+    - Write and read variable length columns.  When writing a table, any fields
+      declared "object" ("O" type char) in the input array will be written to a
+      variable length column.  For numbers, this means vectors of varying
+      length.  For strings, it means varying length strings.
+
+      When reading, there are two options.  1) By default the data are read
+      into fixed length fields with padding to the maximum size in the table
+      column.  This is a "least surprise" approach, since fancy indexing and
+      other array ops will work as expectd.  2) To save memory, construct the
+      FITS object with vstorage='object' to store the data as objects.  This
+      storage can also be written back out to a new FITS file with variable
+      length columns. You can also over-ride the default vstorage when calling
+      read functions.
+
+    - Write and read ascii tables.  cfitsio supports writing scalar 2- and
+      4-byte integers, floats and doubles. But for reading only 4-byte integers
+      and doubles are supported, presumably because of the ambiguity in the
+      tform fields.  Scalar strings are fully supported in both reading and
+      writing.  No array fields are supported for ascii.
+
+    - Append rows to an existing table using the append method.
+            >>> fits.write_table(data1)
+            >>> fits[-1].append(data2)
+
+    - Using the new "where" method, you can select rows in a table where an
+      input expression evaluates to true.  The table is scanned row by row
+      without a large read.  This is surprisingly fast, and useful for figuring
+      out what sections of a large file you want to extract. only requires
+      enough memory to hold the row indices.
+
+            >>> w=fits[ext].where('x > 3 && y < 25')
+            >>> data=fits[ext].read(rows=w)
+            >>> data=fits[ext][w]
+
+    - You can now read rows and columns from a table HDU using slice notation. e.g.
+      to read row subsets from extension 1
+            >>> fits=fitsio.FITS(filename)
+            >>> data=fits[1][:]
+            >>> data=fits[1][10:30]
+            >>> data=fits[1][10:30:2]
+
+      You can also specify a list of rows
+            >>> rows=[3,8,25]
+            >>> data=fits[1][rows]
+
+      This is equivalent to
+            >>> data=fits[1].read(rows=rows)
+
+      To get columns subsets, the notation is similar.  The data are read
+      when the rows are specified.  If a sequence of columns is entered,
+      a recarray is returned, otherwise a simple array.
+            >>> data=fits[1]['x'][:]
+            >>> data=fits[1]['x','y'][3:20]
+            >>> data=fits[1][column_list][row_list]
+
+
+    - Added support for EXTVER header keywords.  When choosing an HDU by name,
+      this allows one to select among HDUs that have the same name. Thanks to
+      Eli Rykoff for suggesting this feature and helping with testing.
+
+    - Name matching for table columns and extension names is not
+      case-insensitive by default.  You can turn on case sensitivity by
+      constructing the FITS object with case_sensitive=True, or sending
+      that keyword to the convenience functions read and read_header.
+
+    - Added write_checksum method to the FITSHDU class, which computes the
+      checksum for the HDU, both the data portion alone (DATASUM keyword)
+      and the checksum complement for the entire HDU (CHECKSUM).
+
+    - Added an extensive test suite.  Use this to run the tests
+        fitsio.test.test()
+
+    - Added fitsio.cfitsio_version() function, returns the cfitsio
+      version as a string.
+
+    - added read_slice method, which is used to implement the slice
+      notation introduced above.
+
+significant code changes
+
+    - Now using fits_read_tblbytes when reading all rows and columns. This
+      is just as fast but does not bypass, and thus confuse, the read buffers.
+    - Removed many direct uses of the internal cfitsio struct objects,
+      preferring to use provided access functions.  This allowed compilation
+      on older cfitsio that had different struct representations.
+
+bug fixes
+
+    - too many to list in this early release.
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644 (file)
index 0000000..3912109
--- /dev/null
@@ -0,0 +1,340 @@
+                   GNU GENERAL PUBLIC LICENSE
+                      Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                           Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+\f
+                   GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+\f
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+\f
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+\f
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                           NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                    END OF TERMS AND CONDITIONS
+\f
+           How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644 (file)
index 0000000..2e685f0
--- /dev/null
@@ -0,0 +1,5 @@
+include *.txt
+include README.md
+recursive-include cfitsio3490 *
+recursive-include patches *
+recursive-include fitsio/test_images *
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644 (file)
index 0000000..e55593d
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,490 @@
+Metadata-Version: 2.1
+Name: fitsio
+Version: 1.1.4
+Summary: A full featured python library to read from and write to FITS files.
+Home-page: https://github.com/esheldon/fitsio
+Author: Erin Scott Sheldon
+Author-email: erin.sheldon@gmail.com
+License: GPL
+Description: A python library to read from and write to FITS files.
+        
+        [![Build Status (master)](https://travis-ci.com/esheldon/fitsio.svg?branch=master)](https://travis-ci.com/esheldon/fitsio)
+        [![tests](https://github.com/esheldon/fitsio/workflows/tests/badge.svg)](https://github.com/esheldon/fitsio/actions?query=workflow%3Atests)
+        
+        ## Description
+        
+        This is a python extension written in c and python.  Data are read into
+        numerical python arrays.
+        
+        A version of cfitsio is bundled with this package, there is no need to install
+        your own, nor will this conflict with a version you have installed.
+        
+        
+        ## Some Features
+        
+        - Read from and write to image, binary, and ascii table extensions.
+        - Read arbitrary subsets of table columns and rows without loading all the data
+          to memory.
+        - Read image subsets without reading the whole image.  Write subsets to existing images.
+        - Write and read variable length table columns.
+        - Read images and tables using slice notation similar to numpy arrays.  This is like a more
+          powerful memmap, since it is column-aware for tables.
+        - Append rows to an existing table.  Delete row sets and row ranges. Resize tables,
+            or insert rows.
+        - Query the columns and rows in a table.
+        - Read and write header keywords.
+        - Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS).
+        - Read/write gzip files directly.  Read unix compress (.Z,.zip) and bzip2 (.bz2) files.
+        - TDIM information is used to return array columns in the correct shape.
+        - Write and read string table columns, including array columns of arbitrary
+          shape.
+        - Read and write complex, bool (logical), unsigned integer, signed bytes types.
+        - Write checksums into the header and verify them.
+        - Insert new columns into tables in-place.
+        - Iterate over rows in a table.  Data are buffered for efficiency.
+        - python 3 support, including python 3 strings
+        
+        
+        ## Examples
+        
+        ```python
+        import fitsio
+        from fitsio import FITS,FITSHDR
+        
+        # Often you just want to quickly read or write data without bothering to
+        # create a FITS object.  In that case, you can use the read and write
+        # convienience functions.
+        
+        # read all data from the first hdu that has data
+        filename='data.fits'
+        data = fitsio.read(filename)
+        
+        # read a subset of rows and columns from a table
+        data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
+        
+        # read the header
+        h = fitsio.read_header(filename)
+        # read both data and header
+        data,h = fitsio.read(filename, header=True)
+        
+        # open the file and write a new binary table extension with the data
+        # array, which is a numpy array with fields, or "recarray".
+        
+        data = np.zeros(10, dtype=[('id','i8'),('ra','f8'),('dec','f8')])
+        fitsio.write(filename, data)
+        
+        # Write an image to the same file. By default a new extension is
+        # added to the file.  use clobber=True to overwrite an existing file
+        # instead.  To append rows to an existing table, see below.
+        
+        fitsio.write(filename, image)
+        
+        # NOTE when reading row subsets, the data must still be read from disk.
+        # This is most efficient if the data are read in the order they appear in
+        # the file.  For this reason, the rows are always returned in row-sorted
+        # order.
+        
+        #
+        # the FITS class gives the you the ability to explore the data, and gives
+        # more control
+        #
+        
+        # open a FITS file for reading and explore
+        fits=fitsio.FITS('data.fits')
+        
+        # see what is in here; the FITS object prints itself
+        print(fits)
+        
+        file: data.fits
+        mode: READONLY
+        extnum hdutype         hduname
+        0      IMAGE_HDU
+        1      BINARY_TBL      mytable
+        
+        # at the python or ipython prompt the fits object will
+        # print itself
+        >>> fits
+        file: data.fits
+        ... etc
+        
+        # explore the extensions, either by extension number or
+        # extension name if available
+        >>> fits[0]
+        
+        file: data.fits
+        extension: 0
+        type: IMAGE_HDU
+        image info:
+          data type: f8
+          dims: [4096,2048]
+        
+        # by name; can also use fits[1]
+        >>> fits['mytable']
+        
+        file: data.fits
+        extension: 1
+        type: BINARY_TBL
+        extname: mytable
+        rows: 4328342
+        column info:
+          i1scalar            u1
+          f                   f4
+          fvec                f4  array[2]
+          darr                f8  array[3,2]
+          dvarr               f8  varray[10]
+          s                   S5
+          svec                S6  array[3]
+          svar                S0  vstring[8]
+          sarr                S2  array[4,3]
+        
+        # See bottom for how to get more information for an extension
+        
+        # [-1] to refers the last HDU
+        >>> fits[-1]
+        ...
+        
+        # if there are multiple HDUs with the same name, and an EXTVER
+        # is set, you can use it.  Here extver=2
+        #    fits['mytable',2]
+        
+        
+        # read the image from extension zero
+        img = fits[0].read()
+        img = fits[0][:,:]
+        
+        # read a subset of the image without reading the whole image
+        img = fits[0][25:35, 45:55]
+        
+        
+        # read all rows and columns from a binary table extension
+        data = fits[1].read()
+        data = fits['mytable'].read()
+        data = fits[1][:]
+        
+        # read a subset of rows and columns. By default uses a case-insensitive
+        # match. The result retains the names with original case.  If columns is a
+        # sequence, a numpy array with fields, or recarray is returned
+        data = fits[1].read(rows=[1,5], columns=['index','x','y'])
+        
+        # Similar but using slice notation
+        # row subsets
+        data = fits[1][10:20]
+        data = fits[1][10:20:2]
+        data = fits[1][[1,5,18]]
+        
+        # Using EXTNAME and EXTVER values
+        data = fits['SCI',2][10:20]
+        
+        # Slicing with reverse (flipped) striding
+        data = fits[1][40:25]
+        data = fits[1][40:25:-5]
+        
+        # all rows of column 'x'
+        data = fits[1]['x'][:]
+        
+        # Read a few columns at once. This is more efficient than separate read for
+        # each column
+        data = fits[1]['x','y'][:]
+        
+        # General column and row subsets.  As noted above, the data are returned
+        # in row sorted order for efficiency reasons.
+        columns=['index','x','y']
+        rows=[1,5]
+        data = fits[1][columns][rows]
+        
+        # iterate over rows in a table hdu
+        # faster if we buffer some rows, let's buffer 1000 at a time
+        fits=fitsio.FITS(filename,iter_row_buffer=1000)
+        for row in fits[1]:
+            print(row)
+        
+        # iterate over HDUs in a FITS object
+        for hdu in fits:
+            data=hdu.read()
+        
+        # Note dvarr shows type varray[10] and svar shows type vstring[8]. These
+        # are variable length columns and the number specified is the maximum size.
+        # By default they are read into fixed-length fields in the output array.
+        # You can over-ride this by constructing the FITS object with the vstorage
+        # keyword or specifying vstorage when reading.  Sending vstorage='object'
+        # will store the data in variable size object fields to save memory; the
+        # default is vstorage='fixed'.  Object fields can also be written out to a
+        # new FITS file as variable length to save disk space.
+        
+        fits = fitsio.FITS(filename,vstorage='object')
+        # OR
+        data = fits[1].read(vstorage='object')
+        print(data['dvarr'].dtype)
+            dtype('object')
+        
+        
+        # you can grab a FITS HDU object to simplify notation
+        hdu1 = fits[1]
+        data = hdu1['x','y'][35:50]
+        
+        # get rows that satisfy the input expression.  See "Row Filtering
+        # Specification" in the cfitsio manual (note no temporary table is
+        # created in this case, contrary to the cfitsio docs)
+        w=fits[1].where("x > 0.25 && y < 35.0")
+        data = fits[1][w]
+        
+        # read the header
+        h = fits[0].read_header()
+        print(h['BITPIX'])
+            -64
+        
+        fits.close()
+        
+        
+        # now write some data
+        fits = FITS('test.fits','rw')
+        
+        
+        # create a rec array.  Note vstr
+        # is a variable length string
+        nrows=35
+        data = np.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'),
+                                      ('arr','f4',(3,4))])
+        data['index'] = np.arange(nrows,dtype='i4')
+        data['x'] = np.random.random(nrows)
+        data['vstr'] = [str(i) for i in xrange(nrows)]
+        data['arr'] = np.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4)
+        
+        # create a new table extension and write the data
+        fits.write(data)
+        
+        # can also be a list of ordinary arrays if you send the names
+        array_list=[xarray,yarray,namearray]
+        names=['x','y','name']
+        fits.write(array_list, names=names)
+        
+        # similarly a dict of arrays
+        fits.write(dict_of_arrays)
+        fits.write(dict_of_arrays, names=names) # control name order
+        
+        # append more rows to the table.  The fields in data2 should match columns
+        # in the table.  missing columns will be filled with zeros
+        fits[-1].append(data2)
+        
+        # insert a new column into a table
+        fits[-1].insert_column('newcol', data)
+        
+        # insert with a specific colnum
+        fits[-1].insert_column('newcol', data, colnum=2)
+        
+        # overwrite rows
+        fits[-1].write(data)
+        
+        # overwrite starting at a particular row. The table will grow if needed
+        fits[-1].write(data, firstrow=350)
+        
+        
+        # create an image
+        img=np.arange(2*3,dtype='i4').reshape(2,3)
+        
+        # write an image in a new HDU (if this is a new file, the primary HDU)
+        fits.write(img)
+        
+        # write an image with rice compression
+        fits.write(img, compress='rice')
+        
+        # control the compression
+        fimg=np.random.normal(size=2*3).reshape(2, 3)
+        fits.write(img, compress='rice', qlevel=16, qmethod='SUBTRACTIVE_DITHER_2')
+        
+        # lossless gzip compression for integers or floating point
+        fits.write(img, compress='gzip', qlevel=None)
+        fits.write(fimg, compress='gzip', qlevel=None)
+        
+        # overwrite the image
+        fits[ext].write(img2)
+        
+        # write into an existing image, starting at the location [300,400]
+        # the image will be expanded if needed
+        fits[ext].write(img3, start=[300,400])
+        
+        # change the shape of the image on disk
+        fits[ext].reshape([250,100])
+        
+        # add checksums for the data
+        fits[-1].write_checksum()
+        
+        # can later verify data integridy
+        fits[-1].verify_checksum()
+        
+        # you can also write a header at the same time.  The header can be
+        #   - a simple dict (no comments)
+        #   - a list of dicts with 'name','value','comment' fields
+        #   - a FITSHDR object
+        
+        hdict = {'somekey': 35, 'location': 'kitt peak'}
+        fits.write(data, header=hdict)
+        hlist = [{'name':'observer', 'value':'ES', 'comment':'who'},
+                 {'name':'location','value':'CTIO'},
+                 {'name':'photometric','value':True}]
+        fits.write(data, header=hlist)
+        hdr=FITSHDR(hlist)
+        fits.write(data, header=hdr)
+        
+        # you can add individual keys to an existing HDU
+        fits[1].write_key(name, value, comment="my comment")
+        
+        # Write multiple header keys to an existing HDU. Here records
+        # is the same as sent with header= above
+        fits[1].write_keys(records)
+        
+        # write special COMMENT fields
+        fits[1].write_comment("observer JS")
+        fits[1].write_comment("we had good weather")
+        
+        # write special history fields
+        fits[1].write_history("processed with software X")
+        fits[1].write_history("re-processed with software Y")
+        
+        fits.close()
+        
+        # using a context, the file is closed automatically after leaving the block
+        with FITS('path/to/file') as fits:
+            data = fits[ext].read()
+        
+            # you can check if a header exists using "in":
+            if 'blah' in fits:
+                data=fits['blah'].read()
+            if 2 in f:
+                data=fits[2].read()
+        
+        # methods to get more information about extension.  For extension 1:
+        f[1].get_info()             # lots of info about the extension
+        f[1].has_data()             # returns True if data is present in extension
+        f[1].get_extname()
+        f[1].get_extver()
+        f[1].get_extnum()           # return zero-offset extension number
+        f[1].get_exttype()          # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU'
+        f[1].get_offsets()          # byte offsets (header_start, data_start, data_end)
+        f[1].is_compressed()        # for images. True if tile-compressed
+        f[1].get_colnames()         # for tables
+        f[1].get_colname(colnum)    # for tables find the name from column number
+        f[1].get_nrows()            # for tables
+        f[1].get_rec_dtype()        # for tables
+        f[1].get_rec_column_descr() # for tables
+        f[1].get_vstorage()         # for tables, storage mechanism for variable
+                                    # length columns
+        
+        # public attributes you can feel free to change as needed
+        f[1].lower           # If True, lower case colnames on output
+        f[1].upper           # If True, upper case colnames on output
+        f[1].case_sensitive  # if True, names are matched case sensitive
+        ```
+        
+        
+        ## Installation
+        
+        The easiest way is using pip or conda. To get the latest release
+        
+            pip install fitsio
+        
+            # update fitsio (and everything else)
+            pip install fitsio --upgrade
+        
+            # if pip refuses to update to a newer version
+            pip install fitsio --upgrade --ignore-installed
+        
+            # if you only want to upgrade fitsio
+            pip install fitsio --no-deps --upgrade --ignore-installed
+        
+            # for conda, use conda-forge
+            conda install -c conda-forge fitsio
+        
+        You can also get the latest source tarball release from
+        
+            https://pypi.python.org/pypi/fitsio
+        
+        or the bleeding edge source from github or use git. To check out
+        the code for the first time
+        
+            git clone https://github.com/esheldon/fitsio.git
+        
+        Or at a later time to update to the latest
+        
+            cd fitsio
+            git update
+        
+        Use tar xvfz to untar the file, enter the fitsio directory and type
+        
+            python setup.py install
+        
+        optionally with a prefix
+        
+            python setup.py install --prefix=/some/path
+        
+        ## Requirements
+        
+        - python 2 or python 3
+        - a C compiler and build tools like `make`, `patch`, etc.
+        - numpy (See the note below. Generally, numpy 1.11 or later is better.)
+        
+        
+        ### Do not use numpy 1.10.0 or 1.10.1
+        
+        There is a serious performance regression in numpy 1.10 that results
+        in fitsio running tens to hundreds of times slower.  A fix may be
+        forthcoming in a later release.  Please comment here if this
+        has already impacted your work https://github.com/numpy/numpy/issues/6467
+        
+        
+        ## Tests
+        
+        The unit tests should all pass for full support.
+        
+        ```bash
+        python -c "import fitsio; fitsio.test.test()"
+        ```
+        
+        Some tests may fail if certain libraries are not available, such
+        as bzip2.  This failure only implies that bzipped files cannot
+        be read, without affecting other functionality.
+        
+        ## Notes on Usage and Features
+        
+        ### cfitsio bundling
+        
+        We bundle cfitsio partly because many deployed versions of cfitsio in the
+        wild do not have support for interesting features like tiled image compression.
+        Bundling a version that meets our needs is a safe alternative.
+        
+        ### array ordering
+        
+        Since numpy uses C order, FITS uses fortran order, we have to write the TDIM
+        and image dimensions in reverse order, but write the data as is.  Then we need
+        to also reverse the dims as read from the header when creating the numpy dtype,
+        but read as is.
+        
+        ### `distutils` vs `setuptools`
+        
+        As of version `1.0.0`, `fitsio` has been transitioned to `setuptools` for packaging
+        and installation. There are many reasons to do this (and to not do this). However,
+        at a practical level, what this means for you is that you may have trouble uninstalling
+        older versions with `pip` via `pip uninstall fitsio`. If you do, the best thing to do is
+        to manually remove the files manually. See this [stackoverflow question](https://stackoverflow.com/questions/402359/how-do-you-uninstall-a-python-package-that-was-installed-using-distutils)
+        for example.
+        
+        ### python 3 strings
+        
+        As of version `1.0.0`, fitsio now supports Python 3 strings natively. This support
+        means that for Python 3, native strings are read from and written correctly to
+        FITS files. All byte string columns are treated as ASCII-encoded unicode strings
+        as well. For FITS files written with a previous version of fitsio, the data
+        in Python 3 will now come back as a string and not a byte string. Note that this
+        support is not the same as full unicode support. Internally, fitsio only supports
+        the ASCII character set.
+        
+        ## TODO
+        
+        - HDU groups: does anyone use these? If so open an issue!
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Scientific/Engineering :: Astronomy
+Classifier: Intended Audience :: Science/Research
+Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
diff --git a/README.md b/README.md
new file mode 100644 (file)
index 0000000..96d3ab9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,475 @@
+A python library to read from and write to FITS files.
+
+[![Build Status (master)](https://travis-ci.com/esheldon/fitsio.svg?branch=master)](https://travis-ci.com/esheldon/fitsio)
+[![tests](https://github.com/esheldon/fitsio/workflows/tests/badge.svg)](https://github.com/esheldon/fitsio/actions?query=workflow%3Atests)
+
+## Description
+
+This is a python extension written in c and python.  Data are read into
+numerical python arrays.
+
+A version of cfitsio is bundled with this package, there is no need to install
+your own, nor will this conflict with a version you have installed.
+
+
+## Some Features
+
+- Read from and write to image, binary, and ascii table extensions.
+- Read arbitrary subsets of table columns and rows without loading all the data
+  to memory.
+- Read image subsets without reading the whole image.  Write subsets to existing images.
+- Write and read variable length table columns.
+- Read images and tables using slice notation similar to numpy arrays.  This is like a more
+  powerful memmap, since it is column-aware for tables.
+- Append rows to an existing table.  Delete row sets and row ranges. Resize tables,
+    or insert rows.
+- Query the columns and rows in a table.
+- Read and write header keywords.
+- Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS).
+- Read/write gzip files directly.  Read unix compress (.Z,.zip) and bzip2 (.bz2) files.
+- TDIM information is used to return array columns in the correct shape.
+- Write and read string table columns, including array columns of arbitrary
+  shape.
+- Read and write complex, bool (logical), unsigned integer, signed bytes types.
+- Write checksums into the header and verify them.
+- Insert new columns into tables in-place.
+- Iterate over rows in a table.  Data are buffered for efficiency.
+- python 3 support, including python 3 strings
+
+
+## Examples
+
+```python
+import fitsio
+from fitsio import FITS,FITSHDR
+
+# Often you just want to quickly read or write data without bothering to
+# create a FITS object.  In that case, you can use the read and write
+# convienience functions.
+
+# read all data from the first hdu that has data
+filename='data.fits'
+data = fitsio.read(filename)
+
+# read a subset of rows and columns from a table
+data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
+
+# read the header
+h = fitsio.read_header(filename)
+# read both data and header
+data,h = fitsio.read(filename, header=True)
+
+# open the file and write a new binary table extension with the data
+# array, which is a numpy array with fields, or "recarray".
+
+data = np.zeros(10, dtype=[('id','i8'),('ra','f8'),('dec','f8')])
+fitsio.write(filename, data)
+
+# Write an image to the same file. By default a new extension is
+# added to the file.  use clobber=True to overwrite an existing file
+# instead.  To append rows to an existing table, see below.
+
+fitsio.write(filename, image)
+
+# NOTE when reading row subsets, the data must still be read from disk.
+# This is most efficient if the data are read in the order they appear in
+# the file.  For this reason, the rows are always returned in row-sorted
+# order.
+
+#
+# the FITS class gives the you the ability to explore the data, and gives
+# more control
+#
+
+# open a FITS file for reading and explore
+fits=fitsio.FITS('data.fits')
+
+# see what is in here; the FITS object prints itself
+print(fits)
+
+file: data.fits
+mode: READONLY
+extnum hdutype         hduname
+0      IMAGE_HDU
+1      BINARY_TBL      mytable
+
+# at the python or ipython prompt the fits object will
+# print itself
+>>> fits
+file: data.fits
+... etc
+
+# explore the extensions, either by extension number or
+# extension name if available
+>>> fits[0]
+
+file: data.fits
+extension: 0
+type: IMAGE_HDU
+image info:
+  data type: f8
+  dims: [4096,2048]
+
+# by name; can also use fits[1]
+>>> fits['mytable']
+
+file: data.fits
+extension: 1
+type: BINARY_TBL
+extname: mytable
+rows: 4328342
+column info:
+  i1scalar            u1
+  f                   f4
+  fvec                f4  array[2]
+  darr                f8  array[3,2]
+  dvarr               f8  varray[10]
+  s                   S5
+  svec                S6  array[3]
+  svar                S0  vstring[8]
+  sarr                S2  array[4,3]
+
+# See bottom for how to get more information for an extension
+
+# [-1] to refers the last HDU
+>>> fits[-1]
+...
+
+# if there are multiple HDUs with the same name, and an EXTVER
+# is set, you can use it.  Here extver=2
+#    fits['mytable',2]
+
+
+# read the image from extension zero
+img = fits[0].read()
+img = fits[0][:,:]
+
+# read a subset of the image without reading the whole image
+img = fits[0][25:35, 45:55]
+
+
+# read all rows and columns from a binary table extension
+data = fits[1].read()
+data = fits['mytable'].read()
+data = fits[1][:]
+
+# read a subset of rows and columns. By default uses a case-insensitive
+# match. The result retains the names with original case.  If columns is a
+# sequence, a numpy array with fields, or recarray is returned
+data = fits[1].read(rows=[1,5], columns=['index','x','y'])
+
+# Similar but using slice notation
+# row subsets
+data = fits[1][10:20]
+data = fits[1][10:20:2]
+data = fits[1][[1,5,18]]
+
+# Using EXTNAME and EXTVER values
+data = fits['SCI',2][10:20]
+
+# Slicing with reverse (flipped) striding
+data = fits[1][40:25]
+data = fits[1][40:25:-5]
+
+# all rows of column 'x'
+data = fits[1]['x'][:]
+
+# Read a few columns at once. This is more efficient than separate read for
+# each column
+data = fits[1]['x','y'][:]
+
+# General column and row subsets.  As noted above, the data are returned
+# in row sorted order for efficiency reasons.
+columns=['index','x','y']
+rows=[1,5]
+data = fits[1][columns][rows]
+
+# iterate over rows in a table hdu
+# faster if we buffer some rows, let's buffer 1000 at a time
+fits=fitsio.FITS(filename,iter_row_buffer=1000)
+for row in fits[1]:
+    print(row)
+
+# iterate over HDUs in a FITS object
+for hdu in fits:
+    data=hdu.read()
+
+# Note dvarr shows type varray[10] and svar shows type vstring[8]. These
+# are variable length columns and the number specified is the maximum size.
+# By default they are read into fixed-length fields in the output array.
+# You can over-ride this by constructing the FITS object with the vstorage
+# keyword or specifying vstorage when reading.  Sending vstorage='object'
+# will store the data in variable size object fields to save memory; the
+# default is vstorage='fixed'.  Object fields can also be written out to a
+# new FITS file as variable length to save disk space.
+
+fits = fitsio.FITS(filename,vstorage='object')
+# OR
+data = fits[1].read(vstorage='object')
+print(data['dvarr'].dtype)
+    dtype('object')
+
+
+# you can grab a FITS HDU object to simplify notation
+hdu1 = fits[1]
+data = hdu1['x','y'][35:50]
+
+# get rows that satisfy the input expression.  See "Row Filtering
+# Specification" in the cfitsio manual (note no temporary table is
+# created in this case, contrary to the cfitsio docs)
+w=fits[1].where("x > 0.25 && y < 35.0")
+data = fits[1][w]
+
+# read the header
+h = fits[0].read_header()
+print(h['BITPIX'])
+    -64
+
+fits.close()
+
+
+# now write some data
+fits = FITS('test.fits','rw')
+
+
+# create a rec array.  Note vstr
+# is a variable length string
+nrows=35
+data = np.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'),
+                              ('arr','f4',(3,4))])
+data['index'] = np.arange(nrows,dtype='i4')
+data['x'] = np.random.random(nrows)
+data['vstr'] = [str(i) for i in xrange(nrows)]
+data['arr'] = np.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4)
+
+# create a new table extension and write the data
+fits.write(data)
+
+# can also be a list of ordinary arrays if you send the names
+array_list=[xarray,yarray,namearray]
+names=['x','y','name']
+fits.write(array_list, names=names)
+
+# similarly a dict of arrays
+fits.write(dict_of_arrays)
+fits.write(dict_of_arrays, names=names) # control name order
+
+# append more rows to the table.  The fields in data2 should match columns
+# in the table.  missing columns will be filled with zeros
+fits[-1].append(data2)
+
+# insert a new column into a table
+fits[-1].insert_column('newcol', data)
+
+# insert with a specific colnum
+fits[-1].insert_column('newcol', data, colnum=2)
+
+# overwrite rows
+fits[-1].write(data)
+
+# overwrite starting at a particular row. The table will grow if needed
+fits[-1].write(data, firstrow=350)
+
+
+# create an image
+img=np.arange(2*3,dtype='i4').reshape(2,3)
+
+# write an image in a new HDU (if this is a new file, the primary HDU)
+fits.write(img)
+
+# write an image with rice compression
+fits.write(img, compress='rice')
+
+# control the compression
+fimg=np.random.normal(size=2*3).reshape(2, 3)
+fits.write(img, compress='rice', qlevel=16, qmethod='SUBTRACTIVE_DITHER_2')
+
+# lossless gzip compression for integers or floating point
+fits.write(img, compress='gzip', qlevel=None)
+fits.write(fimg, compress='gzip', qlevel=None)
+
+# overwrite the image
+fits[ext].write(img2)
+
+# write into an existing image, starting at the location [300,400]
+# the image will be expanded if needed
+fits[ext].write(img3, start=[300,400])
+
+# change the shape of the image on disk
+fits[ext].reshape([250,100])
+
+# add checksums for the data
+fits[-1].write_checksum()
+
+# can later verify data integridy
+fits[-1].verify_checksum()
+
+# you can also write a header at the same time.  The header can be
+#   - a simple dict (no comments)
+#   - a list of dicts with 'name','value','comment' fields
+#   - a FITSHDR object
+
+hdict = {'somekey': 35, 'location': 'kitt peak'}
+fits.write(data, header=hdict)
+hlist = [{'name':'observer', 'value':'ES', 'comment':'who'},
+         {'name':'location','value':'CTIO'},
+         {'name':'photometric','value':True}]
+fits.write(data, header=hlist)
+hdr=FITSHDR(hlist)
+fits.write(data, header=hdr)
+
+# you can add individual keys to an existing HDU
+fits[1].write_key(name, value, comment="my comment")
+
+# Write multiple header keys to an existing HDU. Here records
+# is the same as sent with header= above
+fits[1].write_keys(records)
+
+# write special COMMENT fields
+fits[1].write_comment("observer JS")
+fits[1].write_comment("we had good weather")
+
+# write special history fields
+fits[1].write_history("processed with software X")
+fits[1].write_history("re-processed with software Y")
+
+fits.close()
+
+# using a context, the file is closed automatically after leaving the block
+with FITS('path/to/file') as fits:
+    data = fits[ext].read()
+
+    # you can check if a header exists using "in":
+    if 'blah' in fits:
+        data=fits['blah'].read()
+    if 2 in f:
+        data=fits[2].read()
+
+# methods to get more information about extension.  For extension 1:
+f[1].get_info()             # lots of info about the extension
+f[1].has_data()             # returns True if data is present in extension
+f[1].get_extname()
+f[1].get_extver()
+f[1].get_extnum()           # return zero-offset extension number
+f[1].get_exttype()          # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU'
+f[1].get_offsets()          # byte offsets (header_start, data_start, data_end)
+f[1].is_compressed()        # for images. True if tile-compressed
+f[1].get_colnames()         # for tables
+f[1].get_colname(colnum)    # for tables find the name from column number
+f[1].get_nrows()            # for tables
+f[1].get_rec_dtype()        # for tables
+f[1].get_rec_column_descr() # for tables
+f[1].get_vstorage()         # for tables, storage mechanism for variable
+                            # length columns
+
+# public attributes you can feel free to change as needed
+f[1].lower           # If True, lower case colnames on output
+f[1].upper           # If True, upper case colnames on output
+f[1].case_sensitive  # if True, names are matched case sensitive
+```
+
+
+## Installation
+
+The easiest way is using pip or conda. To get the latest release
+
+    pip install fitsio
+
+    # update fitsio (and everything else)
+    pip install fitsio --upgrade
+
+    # if pip refuses to update to a newer version
+    pip install fitsio --upgrade --ignore-installed
+
+    # if you only want to upgrade fitsio
+    pip install fitsio --no-deps --upgrade --ignore-installed
+
+    # for conda, use conda-forge
+    conda install -c conda-forge fitsio
+
+You can also get the latest source tarball release from
+
+    https://pypi.python.org/pypi/fitsio
+
+or the bleeding edge source from github or use git. To check out
+the code for the first time
+
+    git clone https://github.com/esheldon/fitsio.git
+
+Or at a later time to update to the latest
+
+    cd fitsio
+    git update
+
+Use tar xvfz to untar the file, enter the fitsio directory and type
+
+    python setup.py install
+
+optionally with a prefix
+
+    python setup.py install --prefix=/some/path
+
+## Requirements
+
+- python 2 or python 3
+- a C compiler and build tools like `make`, `patch`, etc.
+- numpy (See the note below. Generally, numpy 1.11 or later is better.)
+
+
+### Do not use numpy 1.10.0 or 1.10.1
+
+There is a serious performance regression in numpy 1.10 that results
+in fitsio running tens to hundreds of times slower.  A fix may be
+forthcoming in a later release.  Please comment here if this
+has already impacted your work https://github.com/numpy/numpy/issues/6467
+
+
+## Tests
+
+The unit tests should all pass for full support.
+
+```bash
+python -c "import fitsio; fitsio.test.test()"
+```
+
+Some tests may fail if certain libraries are not available, such
+as bzip2.  This failure only implies that bzipped files cannot
+be read, without affecting other functionality.
+
+## Notes on Usage and Features
+
+### cfitsio bundling
+
+We bundle cfitsio partly because many deployed versions of cfitsio in the
+wild do not have support for interesting features like tiled image compression.
+Bundling a version that meets our needs is a safe alternative.
+
+### array ordering
+
+Since numpy uses C order, FITS uses fortran order, we have to write the TDIM
+and image dimensions in reverse order, but write the data as is.  Then we need
+to also reverse the dims as read from the header when creating the numpy dtype,
+but read as is.
+
+### `distutils` vs `setuptools`
+
+As of version `1.0.0`, `fitsio` has been transitioned to `setuptools` for packaging
+and installation. There are many reasons to do this (and to not do this). However,
+at a practical level, what this means for you is that you may have trouble uninstalling
+older versions with `pip` via `pip uninstall fitsio`. If you do, the best thing to do is
+to manually remove the files manually. See this [stackoverflow question](https://stackoverflow.com/questions/402359/how-do-you-uninstall-a-python-package-that-was-installed-using-distutils)
+for example.
+
+### python 3 strings
+
+As of version `1.0.0`, fitsio now supports Python 3 strings natively. This support
+means that for Python 3, native strings are read from and written correctly to
+FITS files. All byte string columns are treated as ASCII-encoded unicode strings
+as well. For FITS files written with a previous version of fitsio, the data
+in Python 3 will now come back as a string and not a byte string. Note that this
+support is not the same as full unicode support. Internally, fitsio only supports
+the ASCII character set.
+
+## TODO
+
+- HDU groups: does anyone use these? If so open an issue!
diff --git a/fitsio.egg-info/PKG-INFO b/fitsio.egg-info/PKG-INFO
new file mode 100644 (file)
index 0000000..e55593d
--- /dev/null
@@ -0,0 +1,490 @@
+Metadata-Version: 2.1
+Name: fitsio
+Version: 1.1.4
+Summary: A full featured python library to read from and write to FITS files.
+Home-page: https://github.com/esheldon/fitsio
+Author: Erin Scott Sheldon
+Author-email: erin.sheldon@gmail.com
+License: GPL
+Description: A python library to read from and write to FITS files.
+        
+        [![Build Status (master)](https://travis-ci.com/esheldon/fitsio.svg?branch=master)](https://travis-ci.com/esheldon/fitsio)
+        [![tests](https://github.com/esheldon/fitsio/workflows/tests/badge.svg)](https://github.com/esheldon/fitsio/actions?query=workflow%3Atests)
+        
+        ## Description
+        
+        This is a python extension written in c and python.  Data are read into
+        numerical python arrays.
+        
+        A version of cfitsio is bundled with this package, there is no need to install
+        your own, nor will this conflict with a version you have installed.
+        
+        
+        ## Some Features
+        
+        - Read from and write to image, binary, and ascii table extensions.
+        - Read arbitrary subsets of table columns and rows without loading all the data
+          to memory.
+        - Read image subsets without reading the whole image.  Write subsets to existing images.
+        - Write and read variable length table columns.
+        - Read images and tables using slice notation similar to numpy arrays.  This is like a more
+          powerful memmap, since it is column-aware for tables.
+        - Append rows to an existing table.  Delete row sets and row ranges. Resize tables,
+            or insert rows.
+        - Query the columns and rows in a table.
+        - Read and write header keywords.
+        - Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS).
+        - Read/write gzip files directly.  Read unix compress (.Z,.zip) and bzip2 (.bz2) files.
+        - TDIM information is used to return array columns in the correct shape.
+        - Write and read string table columns, including array columns of arbitrary
+          shape.
+        - Read and write complex, bool (logical), unsigned integer, signed bytes types.
+        - Write checksums into the header and verify them.
+        - Insert new columns into tables in-place.
+        - Iterate over rows in a table.  Data are buffered for efficiency.
+        - python 3 support, including python 3 strings
+        
+        
+        ## Examples
+        
+        ```python
+        import fitsio
+        from fitsio import FITS,FITSHDR
+        
+        # Often you just want to quickly read or write data without bothering to
+        # create a FITS object.  In that case, you can use the read and write
+        # convienience functions.
+        
+        # read all data from the first hdu that has data
+        filename='data.fits'
+        data = fitsio.read(filename)
+        
+        # read a subset of rows and columns from a table
+        data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
+        
+        # read the header
+        h = fitsio.read_header(filename)
+        # read both data and header
+        data,h = fitsio.read(filename, header=True)
+        
+        # open the file and write a new binary table extension with the data
+        # array, which is a numpy array with fields, or "recarray".
+        
+        data = np.zeros(10, dtype=[('id','i8'),('ra','f8'),('dec','f8')])
+        fitsio.write(filename, data)
+        
+        # Write an image to the same file. By default a new extension is
+        # added to the file.  use clobber=True to overwrite an existing file
+        # instead.  To append rows to an existing table, see below.
+        
+        fitsio.write(filename, image)
+        
+        # NOTE when reading row subsets, the data must still be read from disk.
+        # This is most efficient if the data are read in the order they appear in
+        # the file.  For this reason, the rows are always returned in row-sorted
+        # order.
+        
+        #
+        # the FITS class gives the you the ability to explore the data, and gives
+        # more control
+        #
+        
+        # open a FITS file for reading and explore
+        fits=fitsio.FITS('data.fits')
+        
+        # see what is in here; the FITS object prints itself
+        print(fits)
+        
+        file: data.fits
+        mode: READONLY
+        extnum hdutype         hduname
+        0      IMAGE_HDU
+        1      BINARY_TBL      mytable
+        
+        # at the python or ipython prompt the fits object will
+        # print itself
+        >>> fits
+        file: data.fits
+        ... etc
+        
+        # explore the extensions, either by extension number or
+        # extension name if available
+        >>> fits[0]
+        
+        file: data.fits
+        extension: 0
+        type: IMAGE_HDU
+        image info:
+          data type: f8
+          dims: [4096,2048]
+        
+        # by name; can also use fits[1]
+        >>> fits['mytable']
+        
+        file: data.fits
+        extension: 1
+        type: BINARY_TBL
+        extname: mytable
+        rows: 4328342
+        column info:
+          i1scalar            u1
+          f                   f4
+          fvec                f4  array[2]
+          darr                f8  array[3,2]
+          dvarr               f8  varray[10]
+          s                   S5
+          svec                S6  array[3]
+          svar                S0  vstring[8]
+          sarr                S2  array[4,3]
+        
+        # See bottom for how to get more information for an extension
+        
+        # [-1] to refers the last HDU
+        >>> fits[-1]
+        ...
+        
+        # if there are multiple HDUs with the same name, and an EXTVER
+        # is set, you can use it.  Here extver=2
+        #    fits['mytable',2]
+        
+        
+        # read the image from extension zero
+        img = fits[0].read()
+        img = fits[0][:,:]
+        
+        # read a subset of the image without reading the whole image
+        img = fits[0][25:35, 45:55]
+        
+        
+        # read all rows and columns from a binary table extension
+        data = fits[1].read()
+        data = fits['mytable'].read()
+        data = fits[1][:]
+        
+        # read a subset of rows and columns. By default uses a case-insensitive
+        # match. The result retains the names with original case.  If columns is a
+        # sequence, a numpy array with fields, or recarray is returned
+        data = fits[1].read(rows=[1,5], columns=['index','x','y'])
+        
+        # Similar but using slice notation
+        # row subsets
+        data = fits[1][10:20]
+        data = fits[1][10:20:2]
+        data = fits[1][[1,5,18]]
+        
+        # Using EXTNAME and EXTVER values
+        data = fits['SCI',2][10:20]
+        
+        # Slicing with reverse (flipped) striding
+        data = fits[1][40:25]
+        data = fits[1][40:25:-5]
+        
+        # all rows of column 'x'
+        data = fits[1]['x'][:]
+        
+        # Read a few columns at once. This is more efficient than separate read for
+        # each column
+        data = fits[1]['x','y'][:]
+        
+        # General column and row subsets.  As noted above, the data are returned
+        # in row sorted order for efficiency reasons.
+        columns=['index','x','y']
+        rows=[1,5]
+        data = fits[1][columns][rows]
+        
+        # iterate over rows in a table hdu
+        # faster if we buffer some rows, let's buffer 1000 at a time
+        fits=fitsio.FITS(filename,iter_row_buffer=1000)
+        for row in fits[1]:
+            print(row)
+        
+        # iterate over HDUs in a FITS object
+        for hdu in fits:
+            data=hdu.read()
+        
+        # Note dvarr shows type varray[10] and svar shows type vstring[8]. These
+        # are variable length columns and the number specified is the maximum size.
+        # By default they are read into fixed-length fields in the output array.
+        # You can over-ride this by constructing the FITS object with the vstorage
+        # keyword or specifying vstorage when reading.  Sending vstorage='object'
+        # will store the data in variable size object fields to save memory; the
+        # default is vstorage='fixed'.  Object fields can also be written out to a
+        # new FITS file as variable length to save disk space.
+        
+        fits = fitsio.FITS(filename,vstorage='object')
+        # OR
+        data = fits[1].read(vstorage='object')
+        print(data['dvarr'].dtype)
+            dtype('object')
+        
+        
+        # you can grab a FITS HDU object to simplify notation
+        hdu1 = fits[1]
+        data = hdu1['x','y'][35:50]
+        
+        # get rows that satisfy the input expression.  See "Row Filtering
+        # Specification" in the cfitsio manual (note no temporary table is
+        # created in this case, contrary to the cfitsio docs)
+        w=fits[1].where("x > 0.25 && y < 35.0")
+        data = fits[1][w]
+        
+        # read the header
+        h = fits[0].read_header()
+        print(h['BITPIX'])
+            -64
+        
+        fits.close()
+        
+        
+        # now write some data
+        fits = FITS('test.fits','rw')
+        
+        
+        # create a rec array.  Note vstr
+        # is a variable length string
+        nrows=35
+        data = np.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'),
+                                      ('arr','f4',(3,4))])
+        data['index'] = np.arange(nrows,dtype='i4')
+        data['x'] = np.random.random(nrows)
+        data['vstr'] = [str(i) for i in xrange(nrows)]
+        data['arr'] = np.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4)
+        
+        # create a new table extension and write the data
+        fits.write(data)
+        
+        # can also be a list of ordinary arrays if you send the names
+        array_list=[xarray,yarray,namearray]
+        names=['x','y','name']
+        fits.write(array_list, names=names)
+        
+        # similarly a dict of arrays
+        fits.write(dict_of_arrays)
+        fits.write(dict_of_arrays, names=names) # control name order
+        
+        # append more rows to the table.  The fields in data2 should match columns
+        # in the table.  missing columns will be filled with zeros
+        fits[-1].append(data2)
+        
+        # insert a new column into a table
+        fits[-1].insert_column('newcol', data)
+        
+        # insert with a specific colnum
+        fits[-1].insert_column('newcol', data, colnum=2)
+        
+        # overwrite rows
+        fits[-1].write(data)
+        
+        # overwrite starting at a particular row. The table will grow if needed
+        fits[-1].write(data, firstrow=350)
+        
+        
+        # create an image
+        img=np.arange(2*3,dtype='i4').reshape(2,3)
+        
+        # write an image in a new HDU (if this is a new file, the primary HDU)
+        fits.write(img)
+        
+        # write an image with rice compression
+        fits.write(img, compress='rice')
+        
+        # control the compression
+        fimg=np.random.normal(size=2*3).reshape(2, 3)
+        fits.write(img, compress='rice', qlevel=16, qmethod='SUBTRACTIVE_DITHER_2')
+        
+        # lossless gzip compression for integers or floating point
+        fits.write(img, compress='gzip', qlevel=None)
+        fits.write(fimg, compress='gzip', qlevel=None)
+        
+        # overwrite the image
+        fits[ext].write(img2)
+        
+        # write into an existing image, starting at the location [300,400]
+        # the image will be expanded if needed
+        fits[ext].write(img3, start=[300,400])
+        
+        # change the shape of the image on disk
+        fits[ext].reshape([250,100])
+        
+        # add checksums for the data
+        fits[-1].write_checksum()
+        
+        # can later verify data integridy
+        fits[-1].verify_checksum()
+        
+        # you can also write a header at the same time.  The header can be
+        #   - a simple dict (no comments)
+        #   - a list of dicts with 'name','value','comment' fields
+        #   - a FITSHDR object
+        
+        hdict = {'somekey': 35, 'location': 'kitt peak'}
+        fits.write(data, header=hdict)
+        hlist = [{'name':'observer', 'value':'ES', 'comment':'who'},
+                 {'name':'location','value':'CTIO'},
+                 {'name':'photometric','value':True}]
+        fits.write(data, header=hlist)
+        hdr=FITSHDR(hlist)
+        fits.write(data, header=hdr)
+        
+        # you can add individual keys to an existing HDU
+        fits[1].write_key(name, value, comment="my comment")
+        
+        # Write multiple header keys to an existing HDU. Here records
+        # is the same as sent with header= above
+        fits[1].write_keys(records)
+        
+        # write special COMMENT fields
+        fits[1].write_comment("observer JS")
+        fits[1].write_comment("we had good weather")
+        
+        # write special history fields
+        fits[1].write_history("processed with software X")
+        fits[1].write_history("re-processed with software Y")
+        
+        fits.close()
+        
+        # using a context, the file is closed automatically after leaving the block
+        with FITS('path/to/file') as fits:
+            data = fits[ext].read()
+        
+            # you can check if a header exists using "in":
+            if 'blah' in fits:
+                data=fits['blah'].read()
+            if 2 in f:
+                data=fits[2].read()
+        
+        # methods to get more information about extension.  For extension 1:
+        f[1].get_info()             # lots of info about the extension
+        f[1].has_data()             # returns True if data is present in extension
+        f[1].get_extname()
+        f[1].get_extver()
+        f[1].get_extnum()           # return zero-offset extension number
+        f[1].get_exttype()          # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU'
+        f[1].get_offsets()          # byte offsets (header_start, data_start, data_end)
+        f[1].is_compressed()        # for images. True if tile-compressed
+        f[1].get_colnames()         # for tables
+        f[1].get_colname(colnum)    # for tables find the name from column number
+        f[1].get_nrows()            # for tables
+        f[1].get_rec_dtype()        # for tables
+        f[1].get_rec_column_descr() # for tables
+        f[1].get_vstorage()         # for tables, storage mechanism for variable
+                                    # length columns
+        
+        # public attributes you can feel free to change as needed
+        f[1].lower           # If True, lower case colnames on output
+        f[1].upper           # If True, upper case colnames on output
+        f[1].case_sensitive  # if True, names are matched case sensitive
+        ```
+        
+        
+        ## Installation
+        
+        The easiest way is using pip or conda. To get the latest release
+        
+            pip install fitsio
+        
+            # update fitsio (and everything else)
+            pip install fitsio --upgrade
+        
+            # if pip refuses to update to a newer version
+            pip install fitsio --upgrade --ignore-installed
+        
+            # if you only want to upgrade fitsio
+            pip install fitsio --no-deps --upgrade --ignore-installed
+        
+            # for conda, use conda-forge
+            conda install -c conda-forge fitsio
+        
+        You can also get the latest source tarball release from
+        
+            https://pypi.python.org/pypi/fitsio
+        
+        or the bleeding edge source from github or use git. To check out
+        the code for the first time
+        
+            git clone https://github.com/esheldon/fitsio.git
+        
+        Or at a later time to update to the latest
+        
+            cd fitsio
+            git update
+        
+        Use tar xvfz to untar the file, enter the fitsio directory and type
+        
+            python setup.py install
+        
+        optionally with a prefix
+        
+            python setup.py install --prefix=/some/path
+        
+        ## Requirements
+        
+        - python 2 or python 3
+        - a C compiler and build tools like `make`, `patch`, etc.
+        - numpy (See the note below. Generally, numpy 1.11 or later is better.)
+        
+        
+        ### Do not use numpy 1.10.0 or 1.10.1
+        
+        There is a serious performance regression in numpy 1.10 that results
+        in fitsio running tens to hundreds of times slower.  A fix may be
+        forthcoming in a later release.  Please comment here if this
+        has already impacted your work https://github.com/numpy/numpy/issues/6467
+        
+        
+        ## Tests
+        
+        The unit tests should all pass for full support.
+        
+        ```bash
+        python -c "import fitsio; fitsio.test.test()"
+        ```
+        
+        Some tests may fail if certain libraries are not available, such
+        as bzip2.  This failure only implies that bzipped files cannot
+        be read, without affecting other functionality.
+        
+        ## Notes on Usage and Features
+        
+        ### cfitsio bundling
+        
+        We bundle cfitsio partly because many deployed versions of cfitsio in the
+        wild do not have support for interesting features like tiled image compression.
+        Bundling a version that meets our needs is a safe alternative.
+        
+        ### array ordering
+        
+        Since numpy uses C order, FITS uses fortran order, we have to write the TDIM
+        and image dimensions in reverse order, but write the data as is.  Then we need
+        to also reverse the dims as read from the header when creating the numpy dtype,
+        but read as is.
+        
+        ### `distutils` vs `setuptools`
+        
+        As of version `1.0.0`, `fitsio` has been transitioned to `setuptools` for packaging
+        and installation. There are many reasons to do this (and to not do this). However,
+        at a practical level, what this means for you is that you may have trouble uninstalling
+        older versions with `pip` via `pip uninstall fitsio`. If you do, the best thing to do is
+        to manually remove the files manually. See this [stackoverflow question](https://stackoverflow.com/questions/402359/how-do-you-uninstall-a-python-package-that-was-installed-using-distutils)
+        for example.
+        
+        ### python 3 strings
+        
+        As of version `1.0.0`, fitsio now supports Python 3 strings natively. This support
+        means that for Python 3, native strings are read from and written correctly to
+        FITS files. All byte string columns are treated as ASCII-encoded unicode strings
+        as well. For FITS files written with a previous version of fitsio, the data
+        in Python 3 will now come back as a string and not a byte string. Note that this
+        support is not the same as full unicode support. Internally, fitsio only supports
+        the ASCII character set.
+        
+        ## TODO
+        
+        - HDU groups: does anyone use these? If so open an issue!
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Scientific/Engineering :: Astronomy
+Classifier: Intended Audience :: Science/Research
+Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
diff --git a/fitsio.egg-info/SOURCES.txt b/fitsio.egg-info/SOURCES.txt
new file mode 100644 (file)
index 0000000..2fa5e79
--- /dev/null
@@ -0,0 +1,201 @@
+.gitignore
+.travis.yml
+CHANGES.md
+LICENSE.txt
+MANIFEST.in
+README.md
+setup.py
+cfitsio3490/CMakeLists.txt
+cfitsio3490/FindPthreads.cmake
+cfitsio3490/License.txt
+cfitsio3490/Makefile.in
+cfitsio3490/README
+cfitsio3490/README.MacOS
+cfitsio3490/README.win
+cfitsio3490/README_OLD.win
+cfitsio3490/buffers.c
+cfitsio3490/cfileio.c
+cfitsio3490/cfitsio.pc.cmake
+cfitsio3490/cfitsio.pc.in
+cfitsio3490/cfitsio_mac.sit.hqx
+cfitsio3490/cfortran.h
+cfitsio3490/checksum.c
+cfitsio3490/config.guess
+cfitsio3490/config.sub
+cfitsio3490/configure
+cfitsio3490/configure.in
+cfitsio3490/cookbook.c
+cfitsio3490/cookbook.f
+cfitsio3490/drvrfile.c
+cfitsio3490/drvrgsiftp.c
+cfitsio3490/drvrgsiftp.h
+cfitsio3490/drvrmem.c
+cfitsio3490/drvrnet.c
+cfitsio3490/drvrsmem.c
+cfitsio3490/drvrsmem.h
+cfitsio3490/editcol.c
+cfitsio3490/edithdu.c
+cfitsio3490/eval.l
+cfitsio3490/eval.y
+cfitsio3490/eval_defs.h
+cfitsio3490/eval_f.c
+cfitsio3490/eval_l.c
+cfitsio3490/eval_tab.h
+cfitsio3490/eval_y.c
+cfitsio3490/f77.inc
+cfitsio3490/f77_wrap.h
+cfitsio3490/f77_wrap1.c
+cfitsio3490/f77_wrap2.c
+cfitsio3490/f77_wrap3.c
+cfitsio3490/f77_wrap4.c
+cfitsio3490/fits_hcompress.c
+cfitsio3490/fits_hdecompress.c
+cfitsio3490/fitscopy.c
+cfitsio3490/fitscore.c
+cfitsio3490/fitsio.h
+cfitsio3490/fitsio2.h
+cfitsio3490/fpack.c
+cfitsio3490/fpack.h
+cfitsio3490/fpackutil.c
+cfitsio3490/funpack.c
+cfitsio3490/getcol.c
+cfitsio3490/getcolb.c
+cfitsio3490/getcold.c
+cfitsio3490/getcole.c
+cfitsio3490/getcoli.c
+cfitsio3490/getcolj.c
+cfitsio3490/getcolk.c
+cfitsio3490/getcoll.c
+cfitsio3490/getcols.c
+cfitsio3490/getcolsb.c
+cfitsio3490/getcolui.c
+cfitsio3490/getcoluj.c
+cfitsio3490/getcoluk.c
+cfitsio3490/getkey.c
+cfitsio3490/group.c
+cfitsio3490/group.h
+cfitsio3490/grparser.c
+cfitsio3490/grparser.h
+cfitsio3490/histo.c
+cfitsio3490/imcompress.c
+cfitsio3490/imcopy.c
+cfitsio3490/install-sh
+cfitsio3490/iraffits.c
+cfitsio3490/iter_a.c
+cfitsio3490/iter_a.f
+cfitsio3490/iter_a.fit
+cfitsio3490/iter_b.c
+cfitsio3490/iter_b.f
+cfitsio3490/iter_b.fit
+cfitsio3490/iter_c.c
+cfitsio3490/iter_c.f
+cfitsio3490/iter_c.fit
+cfitsio3490/iter_image.c
+cfitsio3490/iter_var.c
+cfitsio3490/longnam.h
+cfitsio3490/makefile.bc
+cfitsio3490/makefile.vcc
+cfitsio3490/makepc.bat
+cfitsio3490/modkey.c
+cfitsio3490/pliocomp.c
+cfitsio3490/putcol.c
+cfitsio3490/putcolb.c
+cfitsio3490/putcold.c
+cfitsio3490/putcole.c
+cfitsio3490/putcoli.c
+cfitsio3490/putcolj.c
+cfitsio3490/putcolk.c
+cfitsio3490/putcoll.c
+cfitsio3490/putcols.c
+cfitsio3490/putcolsb.c
+cfitsio3490/putcolu.c
+cfitsio3490/putcolui.c
+cfitsio3490/putcoluj.c
+cfitsio3490/putcoluk.c
+cfitsio3490/putkey.c
+cfitsio3490/quantize.c
+cfitsio3490/region.c
+cfitsio3490/region.h
+cfitsio3490/ricecomp.c
+cfitsio3490/sample.tpl
+cfitsio3490/scalnull.c
+cfitsio3490/simplerng.c
+cfitsio3490/simplerng.h
+cfitsio3490/smem.c
+cfitsio3490/speed.c
+cfitsio3490/swapproc.c
+cfitsio3490/testf77.f
+cfitsio3490/testf77.out
+cfitsio3490/testf77.std
+cfitsio3490/testprog.c
+cfitsio3490/testprog.out
+cfitsio3490/testprog.std
+cfitsio3490/testprog.tpt
+cfitsio3490/vmsieee.c
+cfitsio3490/wcssub.c
+cfitsio3490/wcsutil.c
+cfitsio3490/winDumpExts.mak
+cfitsio3490/windumpexts.c
+cfitsio3490/cfitsio.xcodeproj/project.pbxproj
+cfitsio3490/docs/cfitsio.pdf
+cfitsio3490/docs/cfitsio.ps
+cfitsio3490/docs/cfitsio.tex
+cfitsio3490/docs/cfitsio.toc
+cfitsio3490/docs/cfortran.doc
+cfitsio3490/docs/changes.txt
+cfitsio3490/docs/fitsio.doc
+cfitsio3490/docs/fitsio.pdf
+cfitsio3490/docs/fitsio.ps
+cfitsio3490/docs/fitsio.tex
+cfitsio3490/docs/fitsio.toc
+cfitsio3490/docs/fpackguide.pdf
+cfitsio3490/docs/quick.pdf
+cfitsio3490/docs/quick.ps
+cfitsio3490/docs/quick.tex
+cfitsio3490/docs/quick.toc
+cfitsio3490/zlib/adler32.c
+cfitsio3490/zlib/crc32.c
+cfitsio3490/zlib/crc32.h
+cfitsio3490/zlib/deflate.c
+cfitsio3490/zlib/deflate.h
+cfitsio3490/zlib/infback.c
+cfitsio3490/zlib/inffast.c
+cfitsio3490/zlib/inffast.h
+cfitsio3490/zlib/inffixed.h
+cfitsio3490/zlib/inflate.c
+cfitsio3490/zlib/inflate.h
+cfitsio3490/zlib/inftrees.c
+cfitsio3490/zlib/inftrees.h
+cfitsio3490/zlib/trees.c
+cfitsio3490/zlib/trees.h
+cfitsio3490/zlib/uncompr.c
+cfitsio3490/zlib/zcompress.c
+cfitsio3490/zlib/zconf.h
+cfitsio3490/zlib/zlib.h
+cfitsio3490/zlib/zuncompress.c
+cfitsio3490/zlib/zutil.c
+cfitsio3490/zlib/zutil.h
+fitsio/__init__.py
+fitsio/fitsio_pywrap.c
+fitsio/fitslib.py
+fitsio/header.py
+fitsio/test.py
+fitsio/util.py
+fitsio.egg-info/PKG-INFO
+fitsio.egg-info/SOURCES.txt
+fitsio.egg-info/dependency_links.txt
+fitsio.egg-info/requires.txt
+fitsio.egg-info/top_level.txt
+fitsio/hdu/__init__.py
+fitsio/hdu/base.py
+fitsio/hdu/image.py
+fitsio/hdu/table.py
+fitsio/test_images/test_gzip_compressed_image.fits.fz
+patches/README.md
+patches/build_cfitsio_patches.py
+patches/configure.in.patch
+patches/configure.patch
+patches/drvrnet.c.patch
+patches/fitscore.c.patch
+patches/fitsio.h.patch
+patches/putcols.c.patch
\ No newline at end of file
diff --git a/fitsio.egg-info/dependency_links.txt b/fitsio.egg-info/dependency_links.txt
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/fitsio.egg-info/requires.txt b/fitsio.egg-info/requires.txt
new file mode 100644 (file)
index 0000000..24ce15a
--- /dev/null
@@ -0,0 +1 @@
+numpy
diff --git a/fitsio.egg-info/top_level.txt b/fitsio.egg-info/top_level.txt
new file mode 100644 (file)
index 0000000..78387cd
--- /dev/null
@@ -0,0 +1 @@
+fitsio
diff --git a/fitsio/__init__.py b/fitsio/__init__.py
new file mode 100644 (file)
index 0000000..2792165
--- /dev/null
@@ -0,0 +1,39 @@
+# flake8: noqa
+"""
+A python library to read and write data to FITS files using cfitsio.
+See the docs at https://github.com/esheldon/fitsio for example
+usage.
+"""
+
+__version__ = '1.1.4'
+
+from . import fitslib
+
+from .fitslib import (
+    FITS,
+    read,
+    read_header,
+    read_scamp_head,
+    write,
+    READONLY,
+    READWRITE,
+
+    NOCOMPRESS,
+    RICE_1,
+    GZIP_1,
+    GZIP_2,
+    PLIO_1,
+    HCOMPRESS_1,
+
+    NO_DITHER,
+    SUBTRACTIVE_DITHER_1,
+    SUBTRACTIVE_DITHER_2,
+)
+
+from .header import FITSHDR, FITSRecord, FITSCard
+from .hdu import BINARY_TBL, ASCII_TBL, IMAGE_HDU
+
+from . import util
+from .util import cfitsio_version, FITSRuntimeWarning
+
+from . import test
diff --git a/fitsio/fitsio_pywrap.c b/fitsio/fitsio_pywrap.c
new file mode 100644 (file)
index 0000000..6680a7e
--- /dev/null
@@ -0,0 +1,4859 @@
+/*
+ * fitsio_pywrap.c
+ *
+ * This is a CPython wrapper for the cfitsio library.
+
+  Copyright (C) 2011  Erin Sheldon, BNL.  erin dot sheldon at gmail dot com
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+
+#include <string.h>
+#include <Python.h>
+#include "fitsio.h"
+#include "fitsio2.h"
+//#include "fitsio_pywrap_lists.h"
+#include <numpy/arrayobject.h> 
+
+
+// this is not defined anywhere in cfitsio except in
+// the fits file structure
+#define CFITSIO_MAX_ARRAY_DIMS 99
+
+// not sure where this is defined in numpy...
+#define NUMPY_MAX_DIMS 32
+
+struct PyFITSObject {
+    PyObject_HEAD
+    fitsfile* fits;
+};
+
+#ifdef FITSIO_PYWRAP_ALWAYS_NONSTANDARD_STRINGS
+static int fits_use_standard_strings(void)
+{
+    return 0;
+}
+#endif
+
+
+// check unicode for python3, string for python2
+int is_python_string(const PyObject* obj)
+{
+#if PY_MAJOR_VERSION >= 3
+    return PyUnicode_Check(obj) || PyBytes_Check(obj);
+#else
+    return PyUnicode_Check(obj) || PyString_Check(obj);
+#endif
+}
+
+
+/*
+   Ensure all elements of the null terminated string are ascii, replacing
+   non-ascii characters with a ?
+*/
+
+static void convert_to_ascii(char* str) {
+    size_t size=0, i=0;
+    int cval=0;
+
+    size = strlen(str);
+    for (i=0; i < size; i++) {
+        cval = (int)str[i];
+        if (cval < 0 || cval > 127) {
+            str[i] = '?';
+        }
+    }
+}
+
+/*
+   Replace bad keyword characters with valid keyword ascii characters,
+   namely A-Z,a-z,0-9,_,-
+
+   To make it clear what has happened, the first four characters will be
+   replaced with J U N K and later bad characters with underscore.
+
+   Does not check the keyword is otherwise valid
+*/
+static void convert_keyword_to_allowed_ascii(char* str) {
+    int isgood=0;
+    size_t size=0, i=0;
+    int cval=0;
+
+    size = strlen(str);
+    for (i=0; i < size; i++) {
+        cval = (int)str[i];
+
+        isgood = 
+            (cval >= 'A' && cval <= 'Z')
+            ||
+            (cval >= 'a' && cval <= 'z')
+            ||
+            (cval >= '0' && cval <= '9')
+            ||
+            (cval == '-')
+            ||
+            (cval == '_');
+
+
+        if (!isgood) {
+            if (i==0) {
+                str[i] = 'J';
+            } else if (i==1) {
+                str[i] = 'U';
+            } else if (i==2) {
+                str[i] = 'N';
+            } else if (i==3) {
+                str[i] = 'K';
+            } else {
+                str[i] = '_';
+            }
+        }
+    }
+}
+
+
+/*
+
+   get a string version of the object. New memory
+   is allocated and the receiver must clean it up.
+
+*/
+
+// unicode is common to python 2 and 3
+static char* get_unicode_as_string(PyObject* obj)
+{
+    PyObject* tmp=NULL;
+    char* strdata=NULL;
+    tmp = PyObject_CallMethod(obj,"encode",NULL);
+
+    strdata = strdup( PyBytes_AsString(tmp) );
+    Py_XDECREF(tmp);
+
+    return strdata;
+}
+
+static char* get_object_as_string(PyObject* obj)
+{
+    PyObject* format=NULL;
+    PyObject* args=NULL;
+    char* strdata=NULL;
+    PyObject* tmpobj1=NULL;
+
+    if (PyUnicode_Check(obj)) {
+
+        strdata=get_unicode_as_string(obj);
+
+    } else {
+
+#if PY_MAJOR_VERSION >= 3
+
+        if (PyBytes_Check(obj)) {
+            strdata = strdup( PyBytes_AsString(obj) );
+        } else {
+            PyObject* tmpobj2=NULL;
+            format = Py_BuildValue("s","%s");
+            // this is not a string object
+            args=PyTuple_New(1);
+
+            PyTuple_SetItem(args,0,obj);
+            tmpobj2 = PyUnicode_Format(format, args);
+            tmpobj1 = PyObject_CallMethod(tmpobj2,"encode",NULL);
+
+            Py_XDECREF(args);
+            Py_XDECREF(tmpobj2);
+
+            strdata = strdup( PyBytes_AsString(tmpobj1) );
+            Py_XDECREF(tmpobj1);
+            Py_XDECREF(format);
+        }
+
+#else
+        // convert to a string as needed
+        if (PyString_Check(obj)) {
+            strdata = strdup( PyString_AsString(obj) );
+        } else {
+            format = Py_BuildValue("s","%s");
+            args=PyTuple_New(1);
+
+            PyTuple_SetItem(args,0,obj);
+            tmpobj1= PyString_Format(format, args);
+
+            strdata = strdup( PyString_AsString(tmpobj1) );
+            Py_XDECREF(args);
+            Py_XDECREF(tmpobj1);
+            Py_XDECREF(format);
+        }
+#endif
+    }
+
+    return strdata;
+}
+
+static void 
+set_ioerr_string_from_status(int status) {
+    char status_str[FLEN_STATUS], errmsg[FLEN_ERRMSG];
+    char message[1024];
+
+    int nleft=1024;
+
+    if (status) {
+        fits_get_errstatus(status, status_str);  /* get the error description */
+
+        sprintf(message, "FITSIO status = %d: %s\n", status, status_str);
+
+        nleft -= strlen(status_str)+1;
+
+        while ( nleft > 0 && fits_read_errmsg(errmsg) )  { /* get error stack messages */
+            strncat(message, errmsg, nleft-1);
+            nleft -= strlen(errmsg)+1;
+            if (nleft >= 2) {
+                strncat(message, "\n", nleft-1);
+            }
+            nleft-=2;
+        }
+        PyErr_SetString(PyExc_IOError, message);
+    }
+    return;
+}
+
+/*
+   string list helper functions
+*/
+
+struct stringlist {
+    size_t size;
+    char** data;
+};
+
+static struct stringlist* stringlist_new(void) {
+    struct stringlist* slist=NULL;
+
+    slist = malloc(sizeof(struct stringlist));
+    slist->size = 0;
+    slist->data=NULL;
+    return slist;
+}
+// push a copy of the string onto the string list
+static void stringlist_push(struct stringlist* slist, const char* str) {
+    size_t newsize=0;
+    size_t i=0;
+
+    newsize = slist->size+1;
+    slist->data = realloc(slist->data, sizeof(char*)*newsize);
+    slist->size += 1;
+
+    i = slist->size-1;
+
+    slist->data[i] = strdup(str);
+}
+
+static void stringlist_push_size(struct stringlist* slist, size_t slen) {
+    size_t newsize=0;
+    size_t i=0;
+
+    newsize = slist->size+1;
+    slist->data = realloc(slist->data, sizeof(char*)*newsize);
+    slist->size += 1;
+
+    i = slist->size-1;
+
+    slist->data[i] = calloc(slen+1,sizeof(char));
+    //slist->data[i] = malloc(sizeof(char)*(slen+1));
+    //memset(slist->data[i], 0, slen+1);
+}
+static struct stringlist* stringlist_delete(struct stringlist* slist) {
+    if (slist != NULL) {
+        size_t i=0;
+        if (slist->data != NULL) {
+            for (i=0; i < slist->size; i++) {
+                free(slist->data[i]);
+            }
+        }
+        free(slist->data);
+        free(slist);
+    }
+    return NULL;
+}
+
+
+/*
+static void stringlist_print(struct stringlist* slist) {
+    size_t i=0;
+    if (slist == NULL) {
+        return;
+    }
+    for (i=0; i<slist->size; i++) {
+        printf("  slist[%ld]: %s\n", i, slist->data[i]);
+    }
+}
+*/
+
+
+static int stringlist_addfrom_listobj(struct stringlist* slist, 
+                                      PyObject* listObj, 
+                                      const char* listname) {
+    size_t size=0, i=0;
+    char* tmpstr=NULL;
+
+    if (!PyList_Check(listObj)) {
+        PyErr_Format(PyExc_ValueError, "Expected a list for %s.", listname);
+        return 1;
+    }
+    size = PyList_Size(listObj);
+
+    for (i=0; i<size; i++) {
+        PyObject* tmp = PyList_GetItem(listObj, i);
+        if (!is_python_string(tmp)) {
+            PyErr_Format(PyExc_ValueError, 
+                         "Expected only strings in %s list.", listname);
+            return 1;
+        }
+        tmpstr = get_object_as_string(tmp);
+        stringlist_push(slist, tmpstr);
+        free(tmpstr);
+    }
+    return 0;
+}
+
+static
+void add_double_to_dict(PyObject* dict, const char* key, double value) {
+    PyObject* tobj=NULL;
+    tobj=PyFloat_FromDouble(value);
+    PyDict_SetItemString(dict, key, tobj);
+    Py_XDECREF(tobj);
+}
+
+static
+void add_long_to_dict(PyObject* dict, const char* key, long value) {
+    PyObject* tobj=NULL;
+    tobj=PyLong_FromLong(value);
+    PyDict_SetItemString(dict, key, tobj);
+    Py_XDECREF(tobj);
+}
+
+static
+void add_long_long_to_dict(PyObject* dict, const char* key, long long value) {
+    PyObject* tobj=NULL;
+    tobj=PyLong_FromLongLong(value);
+    PyDict_SetItemString(dict, key, tobj);
+    Py_XDECREF(tobj);
+}
+
+static
+void add_string_to_dict(PyObject* dict, const char* key, const char* str) {
+    PyObject* tobj=NULL;
+    tobj=Py_BuildValue("s",str);
+    PyDict_SetItemString(dict, key, tobj);
+    Py_XDECREF(tobj);
+}
+
+static
+void add_none_to_dict(PyObject* dict, const char* key) {
+    PyDict_SetItemString(dict, key, Py_None);
+    Py_XINCREF(Py_None);
+}
+static
+void add_true_to_dict(PyObject* dict, const char* key) {
+    PyDict_SetItemString(dict, key, Py_True);
+    Py_XINCREF(Py_True);
+}
+void add_false_to_dict(PyObject* dict, const char* key) {
+    PyDict_SetItemString(dict, key, Py_False);
+    Py_XINCREF(Py_False);
+}
+
+
+/*
+static
+void append_long_to_list(PyObject* list, long value) {
+    PyObject* tobj=NULL;
+    tobj=PyLong_FromLong(value);
+    PyList_Append(list, tobj);
+    Py_XDECREF(tobj);
+}
+*/
+
+static
+void append_long_long_to_list(PyObject* list, long long value) {
+    PyObject* tobj=NULL;
+    tobj=PyLong_FromLongLong(value);
+    PyList_Append(list, tobj);
+    Py_XDECREF(tobj);
+}
+
+/*
+static
+void append_string_to_list(PyObject* list, const char* str) {
+    PyObject* tobj=NULL;
+    tobj=Py_BuildValue("s",str);
+    PyList_Append(list, tobj);
+    Py_XDECREF(tobj);
+}
+*/
+
+
+
+static int
+PyFITSObject_init(struct PyFITSObject* self, PyObject *args, PyObject *kwds)
+{
+    char* filename;
+    int mode;
+    int status=0;
+    int create=0;
+
+    if (!PyArg_ParseTuple(args, (char*)"sii", &filename, &mode, &create)) {
+        return -1;
+    }
+
+    if (create) {
+        // create and open
+        if (fits_create_file(&self->fits, filename, &status)) {
+            set_ioerr_string_from_status(status);
+            return -1;
+        }
+    } else {
+        if (fits_open_file(&self->fits, filename, mode, &status)) {
+            set_ioerr_string_from_status(status);
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+
+static PyObject *
+PyFITSObject_repr(struct PyFITSObject* self) {
+
+    if (self->fits != NULL) {
+        int status=0;
+        char filename[FLEN_FILENAME];
+        char repr[2056];
+
+        if (fits_file_name(self->fits, filename, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+
+        sprintf(repr, "fits file: %s", filename);
+        return Py_BuildValue("s",repr);
+    }  else {
+        return Py_BuildValue("s","none");
+    }
+}
+
+static PyObject *
+PyFITSObject_filename(struct PyFITSObject* self) {
+
+    if (self->fits != NULL) {
+        int status=0;
+        char filename[FLEN_FILENAME];
+        PyObject* fnameObj=NULL;
+        if (fits_file_name(self->fits, filename, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+
+        fnameObj = Py_BuildValue("s",filename);
+        return fnameObj;
+    }  else {
+        PyErr_SetString(PyExc_ValueError, "file is not open, cannot determine name");
+        return NULL;
+    }
+}
+
+
+
+static PyObject *
+PyFITSObject_close(struct PyFITSObject* self)
+{
+    int status=0;
+    if (fits_close_file(self->fits, &status)) {
+        self->fits=NULL;
+        /*
+        set_ioerr_string_from_status(status);
+        return NULL;
+        */
+    }
+    self->fits=NULL;
+    Py_RETURN_NONE;
+}
+
+
+
+static void
+PyFITSObject_dealloc(struct PyFITSObject* self)
+{
+    int status=0;
+    fits_close_file(self->fits, &status);
+#if PY_MAJOR_VERSION >= 3
+    // introduced in python 2.6
+    Py_TYPE(self)->tp_free((PyObject*)self);
+#else
+    // old way, removed in python 3
+    self->ob_type->tp_free((PyObject*)self);
+#endif
+}
+
+
+// this will need to be updated for array string columns.
+// I'm using a tcolumn* here, could cause problems
+static long get_groupsize(tcolumn* colptr) {
+    long gsize=0;
+    if (colptr->tdatatype == TSTRING) {
+        //gsize = colptr->twidth;
+        gsize = colptr->trepeat;
+    } else {
+        gsize = colptr->twidth*colptr->trepeat;
+    }
+    return gsize;
+}
+static npy_int64* get_int64_from_array(PyObject* arr, npy_intp* ncols) {
+
+    npy_int64* colnums;
+    int npy_type=0, check=0;
+
+    if (!PyArray_Check(arr)) {
+        PyErr_SetString(PyExc_TypeError, "int64 array must be an array.");
+        return NULL;
+    }
+
+    npy_type = PyArray_TYPE(arr);
+
+    // on some platforms, creating an 'i8' array gives it a longlong
+    // dtype.  Just make sure it is 8 bytes
+    check=
+        (npy_type == NPY_INT64) 
+        |
+        (npy_type==NPY_LONGLONG && sizeof(npy_longlong)==sizeof(npy_int64));
+       if (!check) {
+        PyErr_Format(PyExc_TypeError,
+                     "array must be an int64 array (%d), got %d.",
+                     NPY_INT64,npy_type);
+        return NULL;
+    }
+    if (!PyArray_ISCONTIGUOUS(arr)) {
+        PyErr_SetString(PyExc_TypeError, "int64 array must be a contiguous.");
+        return NULL;
+    }
+
+    colnums = PyArray_DATA(arr);
+    *ncols = PyArray_SIZE(arr);
+
+    return colnums;
+}
+
+// move hdu by name and possibly version, return the hdu number
+static PyObject *
+PyFITSObject_movnam_hdu(struct PyFITSObject* self, PyObject* args) {
+    int   status=0;
+    int   hdutype=ANY_HDU; // means we don't care if its image or table
+    char* extname=NULL;
+    int   extver=0;        // zero means it is ignored
+    int   hdunum=0;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"isi", &hdutype, &extname, &extver)) {
+        return NULL;
+    }
+
+    if (fits_movnam_hdu(self->fits, hdutype, extname,  extver, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    
+    fits_get_hdu_num(self->fits, &hdunum);
+    return PyLong_FromLong((long)hdunum);
+}
+
+
+
+static PyObject *
+PyFITSObject_movabs_hdu(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0, hdutype=0;
+    int status=0;
+    PyObject* hdutypeObj=NULL;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    hdutypeObj = PyLong_FromLong((long)hdutype);
+    return hdutypeObj;
+}
+
+// get info for the specified HDU
+static PyObject *
+PyFITSObject_get_hdu_info(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0, hdutype=0, ext=0, ignore_scaling=FALSE;
+    int status=0, tstatus=0, is_compressed=0;
+    PyObject* dict=NULL;
+
+    char extname[FLEN_VALUE];
+    char hduname[FLEN_VALUE];
+    int extver=0, hduver=0;
+
+    long long header_start;
+    long long data_start;
+    long long data_end;
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"ii", &hdunum, &ignore_scaling)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (ignore_scaling == TRUE
+        && fits_set_bscale(self->fits, 1.0, 0.0, &status)) {
+        return NULL;
+    }
+
+    dict = PyDict_New();
+    ext=hdunum-1;
+
+    add_long_to_dict(dict, "hdunum", (long)hdunum);
+    add_long_to_dict(dict, "extnum", (long)ext);
+    add_long_to_dict(dict, "hdutype", (long)hdutype);
+
+
+    tstatus=0;
+    if (fits_read_key(self->fits, TSTRING, "EXTNAME", extname, NULL, &tstatus)==0) {
+        convert_keyword_to_allowed_ascii(extname);
+        add_string_to_dict(dict, "extname", extname);
+    } else {
+        add_string_to_dict(dict, "extname", "");
+    }
+
+    tstatus=0;
+    if (fits_read_key(self->fits, TSTRING, "HDUNAME", hduname, NULL, &tstatus)==0) {
+        convert_keyword_to_allowed_ascii(hduname);
+        add_string_to_dict(dict, "hduname", hduname);
+    } else {
+        add_string_to_dict(dict, "hduname", "");
+    }
+
+    tstatus=0;
+    if (fits_read_key(self->fits, TINT, "EXTVER", &extver, NULL, &tstatus)==0) {
+        add_long_to_dict(dict, "extver", (long)extver);
+    } else {
+        add_long_to_dict(dict, "extver", (long)0);
+    }
+
+    tstatus=0;
+    if (fits_read_key(self->fits, TINT, "HDUVER", &hduver, NULL, &tstatus)==0) {
+        add_long_to_dict(dict, "hduver", (long)hduver);
+    } else {
+        add_long_to_dict(dict, "hduver", (long)0);
+    }
+
+    tstatus=0;
+    is_compressed=fits_is_compressed_image(self->fits, &tstatus);
+    add_long_to_dict(dict, "is_compressed_image", (long)is_compressed);
+
+
+    // get byte offsets
+    if (0==fits_get_hduaddrll(self->fits, &header_start, &data_start, &data_end, &tstatus)) {
+        add_long_long_to_dict(dict, "header_start", (long)header_start);
+        add_long_long_to_dict(dict, "data_start", (long)data_start);
+        add_long_long_to_dict(dict, "data_end", (long)data_end);
+    } else {
+        add_long_long_to_dict(dict, "header_start", -1);
+        add_long_long_to_dict(dict, "data_start", -1);
+        add_long_long_to_dict(dict, "data_end", -1);
+    }
+
+    int ndims=0;
+    int maxdim=CFITSIO_MAX_ARRAY_DIMS;
+    LONGLONG dims[CFITSIO_MAX_ARRAY_DIMS];
+    if (hdutype == IMAGE_HDU) {
+        // move this into it's own func
+        int tstatus=0;
+        int bitpix=0;
+        int bitpix_equiv=0;
+        char comptype[20];
+        PyObject* dimsObj=PyList_New(0);
+        int i=0;
+
+        //if (fits_read_imghdrll(self->fits, maxdim, simple_p, &bitpix, &ndims,
+        //                       dims, pcount_p, gcount_p, extend_p, &status)) {
+        if (fits_get_img_paramll(self->fits, maxdim, &bitpix, &ndims, dims, &tstatus)) {
+            add_string_to_dict(dict,"error","could not determine image parameters");
+        } else {
+            add_long_to_dict(dict,"ndims",(long)ndims);
+            add_long_to_dict(dict,"img_type",(long)bitpix);
+
+            if (ignore_scaling == TRUE) {
+                // Get the raw type if scaling is being ignored.
+                fits_get_img_type(self->fits, &bitpix_equiv, &status);
+            } else {
+                fits_get_img_equivtype(self->fits, &bitpix_equiv, &status);
+            }
+
+            add_long_to_dict(dict,"img_equiv_type",(long)bitpix_equiv);
+
+            tstatus=0;
+            if (fits_read_key(self->fits, TSTRING, "ZCMPTYPE", 
+                              comptype, NULL, &tstatus)==0) {
+                convert_to_ascii(comptype);
+                add_string_to_dict(dict,"comptype",comptype);
+            } else {
+                add_none_to_dict(dict,"comptype");
+            }
+
+            for (i=0; i<ndims; i++) {
+                append_long_long_to_list(dimsObj, (long long)dims[i]);
+            }
+            PyDict_SetItemString(dict, "dims", dimsObj);
+            Py_XDECREF(dimsObj);
+
+        }
+
+    } else if (hdutype == BINARY_TBL) {
+        int tstatus=0;
+        LONGLONG nrows=0;
+        int ncols=0;
+        PyObject* colinfo = PyList_New(0);
+        int i=0,j=0;
+
+        fits_get_num_rowsll(self->fits, &nrows, &tstatus);
+        fits_get_num_cols(self->fits, &ncols, &tstatus);
+        add_long_long_to_dict(dict,"nrows",(long long)nrows);
+        add_long_to_dict(dict,"ncols",(long)ncols);
+
+        {
+            PyObject* d = NULL;
+            tcolumn* col=NULL;
+            struct stringlist* names=NULL;
+            struct stringlist* tforms=NULL;
+            names=stringlist_new();
+            tforms=stringlist_new();
+
+            for (i=0; i<ncols; i++) {
+                stringlist_push_size(names, 70);
+                stringlist_push_size(tforms, 70);
+            }
+            // just get the names: no other way to do it!
+            fits_read_btblhdrll(self->fits, ncols, NULL, NULL, 
+                                names->data, tforms->data, 
+                                NULL, NULL, NULL, &tstatus);
+
+            for (i=0; i<ncols; i++) {
+                d = PyDict_New();
+                int type=0;
+                LONGLONG repeat=0;
+                LONGLONG width=0;
+
+                convert_to_ascii(names->data[i]);
+                add_string_to_dict(d,"name",names->data[i]);
+                convert_to_ascii(tforms->data[i]);
+                add_string_to_dict(d,"tform",tforms->data[i]);
+
+                fits_get_coltypell(self->fits, i+1, &type, &repeat, &width, &tstatus);
+                add_long_to_dict(d,"type",(long)type);
+                add_long_long_to_dict(d,"repeat",(long long)repeat);
+                add_long_long_to_dict(d,"width",(long long)width);
+
+                fits_get_eqcoltypell(self->fits,i+1,&type,&repeat,&width, &tstatus);
+                add_long_to_dict(d,"eqtype",(long)type);
+
+                tstatus=0;
+                if (fits_read_tdimll(self->fits, i+1, maxdim, &ndims, dims, 
+                                     &tstatus)) {
+                    add_none_to_dict(d,"tdim");
+                } else {
+                    PyObject* dimsObj=PyList_New(0);
+                    for (j=0; j<ndims; j++) {
+                        append_long_long_to_list(dimsObj, (long long)dims[j]);
+                    }
+
+                    PyDict_SetItemString(d, "tdim", dimsObj);
+                    Py_XDECREF(dimsObj);
+                }
+
+                // using the struct, could cause problems
+                // actually, we can use ffgcprll to get this info, but will
+                // be redundant with some others above
+                col = &self->fits->Fptr->tableptr[i];
+                add_double_to_dict(d,"tscale",col->tscale);
+                add_double_to_dict(d,"tzero",col->tzero);
+
+                PyList_Append(colinfo, d);
+                Py_XDECREF(d);
+            }
+            names=stringlist_delete(names);
+            tforms=stringlist_delete(tforms);
+
+            PyDict_SetItemString(dict, "colinfo", colinfo);
+            Py_XDECREF(colinfo);
+        }
+    } else {
+        int tstatus=0;
+        LONGLONG nrows=0;
+        int ncols=0;
+        PyObject* colinfo = PyList_New(0);
+        int i=0,j=0;
+
+        fits_get_num_rowsll(self->fits, &nrows, &tstatus);
+        fits_get_num_cols(self->fits, &ncols, &tstatus);
+        add_long_long_to_dict(dict,"nrows",(long long)nrows);
+        add_long_to_dict(dict,"ncols",(long)ncols);
+
+        {
+            tcolumn* col=NULL;
+            struct stringlist* names=NULL;
+            struct stringlist* tforms=NULL;
+            names=stringlist_new();
+            tforms=stringlist_new();
+
+            for (i=0; i<ncols; i++) {
+                stringlist_push_size(names, 70);
+                stringlist_push_size(tforms, 70);
+            }
+            // just get the names: no other way to do it!
+
+            //                                        rowlen nrows
+            fits_read_atblhdrll(self->fits, ncols, NULL, NULL,
+            //          tfields             tbcol                units
+                        NULL,   names->data, NULL, tforms->data, NULL,
+            //          extname
+                        NULL, &tstatus);
+
+
+
+            for (i=0; i<ncols; i++) {
+                PyObject* d = PyDict_New();
+                int type=0;
+                LONGLONG repeat=0;
+                LONGLONG width=0;
+
+                convert_to_ascii(names->data[i]);
+                add_string_to_dict(d,"name",names->data[i]);
+                convert_to_ascii(tforms->data[i]);
+                add_string_to_dict(d,"tform",tforms->data[i]);
+
+                fits_get_coltypell(self->fits, i+1, &type, &repeat, &width, &tstatus);
+                add_long_to_dict(d,"type",(long)type);
+                add_long_long_to_dict(d,"repeat",(long long)repeat);
+                add_long_long_to_dict(d,"width",(long long)width);
+
+                fits_get_eqcoltypell(self->fits, i+1, &type, &repeat, &width, &tstatus);
+                add_long_to_dict(d,"eqtype",(long)type);
+
+                tstatus=0;
+                if (fits_read_tdimll(self->fits, i+1, maxdim, &ndims, dims, 
+                                                      &tstatus)) {
+                    add_none_to_dict(dict,"tdim");
+                } else {
+                    PyObject* dimsObj=PyList_New(0);
+                    for (j=0; j<ndims; j++) {
+                        append_long_long_to_list(dimsObj, (long long)dims[j]);
+                    }
+
+                    PyDict_SetItemString(d, "tdim", dimsObj);
+                    Py_XDECREF(dimsObj);
+                }
+
+                // using the struct, could cause problems
+                // actually, we can use ffgcprll to get this info, but will
+                // be redundant with some others above
+                col = &self->fits->Fptr->tableptr[i];
+                add_double_to_dict(d,"tscale",col->tscale);
+                add_double_to_dict(d,"tzero",col->tzero);
+
+                PyList_Append(colinfo, d);
+                Py_XDECREF(d);
+            }
+            names=stringlist_delete(names);
+            tforms=stringlist_delete(tforms);
+
+            PyDict_SetItemString(dict, "colinfo", colinfo);
+            Py_XDECREF(colinfo);
+        }
+
+    }
+    return dict;
+}
+
+// get info for the specified HDU
+static PyObject *
+PyFITSObject_get_hdu_name_version(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0, hdutype=0;
+    int status=0;
+
+    char extname[FLEN_VALUE];
+    int extver=0;
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    status=0;
+    if (fits_read_key(self->fits, TINT, "EXTVER", &extver, NULL, &status)!=0) {
+        extver=0;
+    }
+
+    status=0;
+    if (fits_read_key(self->fits, TSTRING, "EXTNAME", extname, NULL, &status)==0) {
+        return Py_BuildValue("si", extname, extver);
+    } else {
+        return Py_BuildValue("si", "", extver);
+    }
+}
+
+
+// this is the parameter that goes in the type for fits_write_col
+static int 
+npy_to_fits_table_type(int npy_dtype, int write_bitcols) {
+
+    char mess[255];
+    switch (npy_dtype) {
+        case NPY_BOOL:
+            if (write_bitcols) {
+                return TBIT;
+            } else {
+                return TLOGICAL;
+            }
+        case NPY_UINT8:
+            return TBYTE;
+        case NPY_INT8:
+            return TSBYTE;
+        case NPY_UINT16:
+            return TUSHORT;
+        case NPY_INT16:
+            return TSHORT;
+        case NPY_UINT32:
+            if (sizeof(unsigned int) == sizeof(npy_uint32)) {
+                return TUINT;
+            } else if (sizeof(unsigned long) == sizeof(npy_uint32)) {
+                return TULONG;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine 4 byte unsigned integer type");
+                return -9999;
+            }
+        case NPY_INT32:
+            if (sizeof(int) == sizeof(npy_int32)) {
+                return TINT;
+            } else if (sizeof(long) == sizeof(npy_int32)) {
+                return TLONG;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine 4 byte integer type");
+                return -9999;
+            }
+
+        case NPY_INT64:
+            if (sizeof(long long) == sizeof(npy_int64)) {
+                return TLONGLONG;
+            } else if (sizeof(long) == sizeof(npy_int64)) {
+                return TLONG;
+            } else if (sizeof(int) == sizeof(npy_int64)) {
+                return TINT;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine 8 byte integer type");
+                return -9999;
+            }
+
+
+        case NPY_FLOAT32:
+            return TFLOAT;
+        case NPY_FLOAT64:
+            return TDOUBLE;
+
+        case NPY_COMPLEX64:
+            return TCOMPLEX;
+        case NPY_COMPLEX128:
+            return TDBLCOMPLEX;
+
+        case NPY_STRING:
+            return TSTRING;
+
+        case NPY_UINT64:
+            PyErr_SetString(PyExc_TypeError, "Unsigned 8 byte integer images are not supported by the FITS standard");
+            return -9999;
+
+        default:
+            sprintf(mess,"Unsupported numpy table datatype %d", npy_dtype);
+            PyErr_SetString(PyExc_TypeError, mess);
+            return -9999;
+    }
+
+    return 0;
+}
+
+
+
+static int 
+npy_to_fits_image_types(int npy_dtype, int *fits_img_type, int *fits_datatype) {
+
+    char mess[255];
+    switch (npy_dtype) {
+        case NPY_UINT8:
+            *fits_img_type = BYTE_IMG;
+            *fits_datatype = TBYTE;
+            break;
+        case NPY_INT8:
+            *fits_img_type = SBYTE_IMG;
+            *fits_datatype = TSBYTE;
+            break;
+        case NPY_UINT16:
+            *fits_img_type = USHORT_IMG;
+            *fits_datatype = TUSHORT;
+            break;
+        case NPY_INT16:
+            *fits_img_type = SHORT_IMG;
+            *fits_datatype = TSHORT;
+            break;
+
+        case NPY_UINT32:
+            //*fits_img_type = ULONG_IMG;
+            if (sizeof(unsigned short) == sizeof(npy_uint32)) {
+                *fits_img_type = USHORT_IMG;
+                *fits_datatype = TUSHORT;
+            } else if (sizeof(unsigned int) == sizeof(npy_uint32)) {
+                // there is no UINT_IMG, so use ULONG_IMG
+                *fits_img_type = ULONG_IMG;
+                *fits_datatype = TUINT;
+            } else if (sizeof(unsigned long) == sizeof(npy_uint32)) {
+                *fits_img_type = ULONG_IMG;
+                *fits_datatype = TULONG;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine 4 byte unsigned integer type");
+                *fits_datatype = -9999;
+                return 1;
+            }
+            break;
+
+        case NPY_INT32:
+            //*fits_img_type = LONG_IMG;
+            if (sizeof(unsigned short) == sizeof(npy_uint32)) {
+                *fits_img_type = SHORT_IMG;
+                *fits_datatype = TINT;
+            } else if (sizeof(int) == sizeof(npy_int32)) {
+                // there is no UINT_IMG, so use ULONG_IMG
+                *fits_img_type = LONG_IMG;
+                *fits_datatype = TINT;
+            } else if (sizeof(long) == sizeof(npy_int32)) {
+                *fits_img_type = LONG_IMG;
+                *fits_datatype = TLONG;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine 4 byte integer type");
+                *fits_datatype = -9999;
+                return 1;
+            }
+            break;
+
+        case NPY_INT64:
+            if (sizeof(LONGLONG) == sizeof(npy_int64)) {
+                *fits_img_type = LONGLONG_IMG;
+                *fits_datatype = TLONGLONG;
+            } else if (sizeof(long) == sizeof(npy_int64)) {
+                *fits_img_type = LONG_IMG;
+                *fits_datatype = TLONG;
+            } else if (sizeof(int) == sizeof(npy_int64)) {
+                // there is no UINT_IMG, so use ULONG_IMG
+                *fits_img_type = LONG_IMG;
+                *fits_datatype = TINT;
+            } else if (sizeof(long long) == sizeof(npy_int64)) {
+                // we don't expect to get here
+                *fits_img_type = LONGLONG_IMG;
+                *fits_datatype = TLONGLONG;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine 8 byte integer type");
+                *fits_datatype = -9999;
+                return 1;
+            }
+            break;
+
+
+        case NPY_FLOAT32:
+            *fits_img_type = FLOAT_IMG;
+            *fits_datatype = TFLOAT;
+            break;
+        case NPY_FLOAT64:
+            *fits_img_type = DOUBLE_IMG;
+            *fits_datatype = TDOUBLE;
+            break;
+
+        case NPY_UINT64:
+            PyErr_SetString(PyExc_TypeError, "Unsigned 8 byte integer images are not supported by the FITS standard");
+            *fits_datatype = -9999;
+            return 1;
+            break;
+
+        default:
+            sprintf(mess,"Unsupported numpy image datatype %d", npy_dtype);
+            PyErr_SetString(PyExc_TypeError, mess);
+            *fits_datatype = -9999;
+            return 1;
+            break;
+    }
+
+    return 0;
+}
+
+
+/* 
+ * this is really only for reading variable length columns since we should be
+ * able to just read the bytes for normal columns
+ */
+static int fits_to_npy_table_type(int fits_dtype, int* isvariable) {
+
+    if (fits_dtype < 0) {
+        *isvariable=1;
+    } else {
+        *isvariable=0;
+    }
+
+    switch (abs(fits_dtype)) {
+        case TBIT:
+            return NPY_INT8;
+        case TLOGICAL: // literal T or F stored as char
+            return NPY_INT8;
+        case TBYTE:
+            return NPY_UINT8;
+        case TSBYTE:
+            return NPY_INT8;
+
+        case TUSHORT:
+            if (sizeof(unsigned short) == sizeof(npy_uint16)) {
+                return NPY_UINT16;
+            } else if (sizeof(unsigned short) == sizeof(npy_uint8)) {
+                return NPY_UINT8;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TUSHORT");
+                return -9999;
+            }
+        case TSHORT:
+            if (sizeof(short) == sizeof(npy_int16)) {
+                return NPY_INT16;
+            } else if (sizeof(short) == sizeof(npy_int8)) {
+                return NPY_INT8;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TSHORT");
+                return -9999;
+            }
+
+        case TUINT:
+            if (sizeof(unsigned int) == sizeof(npy_uint32)) {
+                return NPY_UINT32;
+            } else if (sizeof(unsigned int) == sizeof(npy_uint64)) {
+                return NPY_UINT64;
+            } else if (sizeof(unsigned int) == sizeof(npy_uint16)) {
+                return NPY_UINT16;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TUINT");
+                return -9999;
+            }
+        case TINT:
+            if (sizeof(int) == sizeof(npy_int32)) {
+                return NPY_INT32;
+            } else if (sizeof(int) == sizeof(npy_int64)) {
+                return NPY_INT64;
+            } else if (sizeof(int) == sizeof(npy_int16)) {
+                return NPY_INT16;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TINT");
+                return -9999;
+            }
+
+        case TULONG:
+            if (sizeof(unsigned long) == sizeof(npy_uint32)) {
+                return NPY_UINT32;
+            } else if (sizeof(unsigned long) == sizeof(npy_uint64)) {
+                return NPY_UINT64;
+            } else if (sizeof(unsigned long) == sizeof(npy_uint16)) {
+                return NPY_UINT16;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TULONG");
+                return -9999;
+            }
+        case TLONG:
+            if (sizeof(unsigned long) == sizeof(npy_int32)) {
+                return NPY_INT32;
+            } else if (sizeof(unsigned long) == sizeof(npy_int64)) {
+                return NPY_INT64;
+            } else if (sizeof(long) == sizeof(npy_int16)) {
+                return NPY_INT16;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TLONG");
+                return -9999;
+            }
+
+
+        case TLONGLONG:
+            if (sizeof(LONGLONG) == sizeof(npy_int64)) {
+                return NPY_INT64;
+            } else if (sizeof(LONGLONG) == sizeof(npy_int32)) {
+                return NPY_INT32;
+            } else if (sizeof(LONGLONG) == sizeof(npy_int16)) {
+                return NPY_INT16;
+            } else {
+                PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TLONGLONG");
+                return -9999;
+            }
+
+
+
+        case TFLOAT:
+            return NPY_FLOAT32;
+        case TDOUBLE:
+            return NPY_FLOAT64;
+
+        case TCOMPLEX:
+            return NPY_COMPLEX64;
+        case TDBLCOMPLEX:
+            return NPY_COMPLEX128;
+
+
+        case TSTRING:
+            return NPY_STRING;
+
+        default:
+            PyErr_Format(PyExc_TypeError,"Unsupported FITS table datatype %d", fits_dtype); 
+            return -9999;
+    }
+
+    return 0;
+}
+
+
+
+static int create_empty_hdu(struct PyFITSObject* self)
+{
+    int status=0;
+    int bitpix=SHORT_IMG;
+    int naxis=0;
+    long* naxes=NULL;
+    if (fits_create_img(self->fits, bitpix, naxis, naxes, &status)) {
+        set_ioerr_string_from_status(status);
+        return 1;
+    }
+
+    return 0;
+}
+
+
+// follows fits convention that return value is true
+// for failure
+//
+// exception strings are set internally
+//
+// length checking should happen in python
+//
+// note tile dims are written reverse order since
+// python orders C and fits orders Fortran
+static int set_compression(fitsfile *fits,
+                           int comptype,
+                           PyObject* tile_dims_obj,
+                           int *status) {
+
+    npy_int64 *tile_dims_py=NULL;
+    long *tile_dims_fits=NULL;
+    npy_intp ndims=0, i=0;
+
+    // can be NOCOMPRESS (0)
+    if (fits_set_compression_type(fits, comptype, status)) {
+        set_ioerr_string_from_status(*status);
+        goto _set_compression_bail;
+        return 1;
+    }
+
+    if (tile_dims_obj != Py_None) {
+
+        tile_dims_py=get_int64_from_array(tile_dims_obj, &ndims);
+        if (tile_dims_py==NULL) {
+            *status=1;
+        } else {
+            tile_dims_fits = calloc(ndims,sizeof(long));
+            if (!tile_dims_fits) {
+                PyErr_Format(PyExc_MemoryError, "failed to allocate %ld longs",
+                             ndims);
+                goto _set_compression_bail;
+            }
+
+            for (i=0; i<ndims; i++) {
+                tile_dims_fits[ndims-i-1] = tile_dims_py[i];
+            }
+
+            fits_set_tile_dim(fits, ndims, tile_dims_fits, status);
+
+            free(tile_dims_fits);tile_dims_fits=NULL;
+        }
+    }
+
+_set_compression_bail:
+    return *status;
+}
+
+static int pyarray_get_ndim(PyObject* obj) {
+    PyArrayObject* arr;
+    arr = (PyArrayObject*) obj;
+    return arr->nd;
+}
+
+/*
+   Create an image extension, possible writing data as well.
+
+   We allow creating from dimensions rather than from the input image shape,
+   writing into the HDU later
+
+   It is useful to create the extension first so we can write keywords into the
+   header before adding data.  This avoids moving the data if the header grows
+   too large.
+
+   However, on distributed file systems it can be more efficient to write
+   the data at this time due to slowness with updating the file in place.
+
+ */
+
+static PyObject *
+PyFITSObject_create_image_hdu(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int ndims=0;
+    long *dims=NULL;
+    int image_datatype=0; // fits type for image, AKA bitpix
+    int datatype=0; // type for the data we entered
+
+    int comptype=0; // same as NOCOMPRESS in newer cfitsio
+    PyObject* tile_dims_obj=NULL;
+
+    PyObject* array, *dims_obj;
+    int npy_dtype=0, nkeys=0, write_data=0;
+    int i=0;
+    int status=0;
+
+    char* extname=NULL;
+    int extver=0;
+    float qlevel=0;
+    int qmethod=0;
+    float hcomp_scale=0;
+    int hcomp_smooth=0;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    static char *kwlist[] = {
+        "array","nkeys",
+         "dims",
+         "comptype",
+         "tile_dims",
+
+         "qlevel",
+         "qmethod",
+
+         "hcomp_scale",
+         "hcomp_smooth",
+
+         "extname",
+         "extver",
+         NULL,
+    };
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "Oi|OiOfifisi", kwlist,
+                          &array, &nkeys,
+                          &dims_obj,
+                          &comptype,
+                          &tile_dims_obj,
+
+                          &qlevel,
+                          &qmethod,
+
+                          &hcomp_scale,
+                          &hcomp_smooth,
+
+                          &extname,
+                          &extver)) {
+        goto create_image_hdu_cleanup;
+    }
+
+
+    if (array == Py_None) {
+        if (create_empty_hdu(self)) {
+            return NULL;
+        }
+    } else {
+        if (!PyArray_Check(array)) {
+            PyErr_SetString(PyExc_TypeError, "input must be an array.");
+            goto create_image_hdu_cleanup;
+        }
+
+        npy_dtype = PyArray_TYPE(array);
+        if (npy_to_fits_image_types(npy_dtype, &image_datatype, &datatype)) {
+            goto create_image_hdu_cleanup;
+        }
+
+        if (PyArray_Check(dims_obj)) {
+            // get dims from input, which must be of type 'i8'
+            // this means we are not writing the array that was input,
+            // it is only used to determine the data type
+            npy_int64 *tptr=NULL, tmp=0;
+            ndims = PyArray_SIZE(dims_obj);
+            dims = calloc(ndims,sizeof(long));
+            for (i=0; i<ndims; i++) {
+                tptr = (npy_int64 *) PyArray_GETPTR1(dims_obj, i);
+                tmp = *tptr;
+                dims[ndims-i-1] = (long) tmp;
+            }
+            write_data=0;
+        } else {
+            // we get the dimensions from the array, which means we are going
+            // to write it as well
+            ndims = pyarray_get_ndim(array);
+            dims = calloc(ndims,sizeof(long));
+            for (i=0; i<ndims; i++) {
+                dims[ndims-i-1] = PyArray_DIM(array, i);
+            }
+            write_data=1;
+        }
+
+        // 0 means NOCOMPRESS but that wasn't defined in the bundled version of cfitsio
+        // if (comptype >= 0) {
+        if (comptype > 0) {
+            // exception strings are set internally
+            if (set_compression(self->fits, comptype, tile_dims_obj, &status)) {
+                goto create_image_hdu_cleanup;
+            }
+
+            if (fits_set_quantize_level(self->fits, qlevel, &status)) {
+                goto create_image_hdu_cleanup;
+            }
+
+            if (fits_set_quantize_method(self->fits, qmethod, &status)) {
+                goto create_image_hdu_cleanup;
+            }
+
+            if (comptype == HCOMPRESS_1) {
+
+                if (fits_set_hcomp_scale(self->fits, hcomp_scale, &status)) {
+                    goto create_image_hdu_cleanup;
+                }
+                if (fits_set_hcomp_smooth(self->fits, hcomp_smooth, &status)) {
+                    goto create_image_hdu_cleanup;
+                }
+
+            }
+        }
+
+        if (fits_create_img(self->fits, image_datatype, ndims, dims, &status)) {
+            set_ioerr_string_from_status(status);
+            goto create_image_hdu_cleanup;
+        }
+
+
+    }
+    if (extname != NULL) {
+        if (strlen(extname) > 0) {
+
+            // comments are NULL
+            if (fits_update_key_str(self->fits, "EXTNAME", extname, NULL, &status)) {
+                set_ioerr_string_from_status(status);
+                goto create_image_hdu_cleanup;
+            }
+            if (extver > 0) {
+                if (fits_update_key_lng(self->fits, "EXTVER", (LONGLONG) extver, NULL, &status)) {
+                    set_ioerr_string_from_status(status);
+                    goto create_image_hdu_cleanup;
+                }
+            }
+        }
+    }
+
+    if (nkeys > 0) {
+        if (fits_set_hdrsize(self->fits, nkeys, &status) ) {
+            set_ioerr_string_from_status(status);
+            goto create_image_hdu_cleanup;
+        }
+    }
+
+    if (write_data) {
+        int firstpixel=1;
+        LONGLONG nelements = 0;
+        void* data=NULL;
+        nelements = PyArray_SIZE(array);
+        data = PyArray_DATA(array);
+        if (fits_write_img(self->fits, datatype, firstpixel, nelements, data, &status)) {
+            set_ioerr_string_from_status(status);
+            goto create_image_hdu_cleanup;
+        }
+    }
+
+    // this does a full close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        goto create_image_hdu_cleanup;
+    }
+
+
+create_image_hdu_cleanup:
+
+    if (status != 0) {
+        return NULL;
+    }
+
+    free(dims); dims=NULL;
+    Py_RETURN_NONE;
+}
+
+
+// reshape the image to specified dims
+// the input array must be of type int64
+static PyObject *
+PyFITSObject_reshape_image(struct PyFITSObject* self, PyObject* args) {
+
+    int status=0;
+    int hdunum=0, hdutype=0;
+    PyObject* dims_obj=NULL;
+    LONGLONG dims[CFITSIO_MAX_ARRAY_DIMS]={0};
+    LONGLONG dims_orig[CFITSIO_MAX_ARRAY_DIMS]={0};
+    int ndims=0, ndims_orig=0;
+    npy_int64 dim=0;
+    npy_intp i=0;
+    int bitpix=0, maxdim=CFITSIO_MAX_ARRAY_DIMS;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"iO", &hdunum, &dims_obj)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    // existing image params, just to get bitpix
+    if (fits_get_img_paramll(self->fits, maxdim, &bitpix, &ndims_orig, dims_orig, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    ndims = PyArray_SIZE(dims_obj);
+    for (i=0; i<ndims; i++) {
+        dim= *(npy_int64 *) PyArray_GETPTR1(dims_obj, i);
+        dims[i] = (LONGLONG) dim;
+    }
+
+    if (fits_resize_imgll(self->fits, bitpix, ndims, dims, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+// write the image to an existing HDU created using create_image_hdu
+// dims are not checked
+static PyObject *
+PyFITSObject_write_image(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+    LONGLONG nelements=1;
+    PY_LONG_LONG firstpixel_py=0;
+    LONGLONG firstpixel=0;
+    int image_datatype=0; // fits type for image, AKA bitpix
+    int datatype=0; // type for the data we entered
+
+    PyObject* array;
+    void* data=NULL;
+    int npy_dtype=0;
+    int status=0;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"iOL", &hdunum, &array, &firstpixel_py)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    if (!PyArray_Check(array)) {
+        PyErr_SetString(PyExc_TypeError, "input must be an array.");
+        return NULL;
+    }
+
+    npy_dtype = PyArray_TYPE(array);
+    if (npy_to_fits_image_types(npy_dtype, &image_datatype, &datatype)) {
+        return NULL;
+    }
+
+
+    data = PyArray_DATA(array);
+    nelements = PyArray_SIZE(array);
+    firstpixel = (LONGLONG) firstpixel_py;
+    if (fits_write_img(self->fits, datatype, firstpixel, nelements, data, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this is a full file close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+/*
+ * Write tdims from the list.  The list must be the expected length.
+ * Entries must be strings or None; if None the tdim is not written.
+ *
+ * The keys are written as TDIM{colnum}
+ */
+static int 
+add_tdims_from_listobj(fitsfile* fits, PyObject* tdimObj, int ncols) {
+    int status=0, i=0;
+    size_t size=0;
+    char keyname[20];
+    int colnum=0;
+    PyObject* tmp=NULL;
+    char* tdim=NULL;
+
+    if (tdimObj == NULL || tdimObj == Py_None) {
+        // it is ok for it to be empty
+        return 0;
+    }
+
+    if (!PyList_Check(tdimObj)) {
+        PyErr_SetString(PyExc_ValueError, "Expected a list for tdims");
+        return 1;
+    }
+
+    size = PyList_Size(tdimObj);
+    if (size != (size_t)ncols) {
+        PyErr_Format(PyExc_ValueError, "Expected %d elements in tdims list, got %ld", ncols, size);
+        return 1;
+    }
+
+    for (i=0; i<ncols; i++) {
+        colnum=i+1;
+        tmp = PyList_GetItem(tdimObj, i);
+        if (tmp != Py_None) {
+            if (!is_python_string(tmp)) {
+                PyErr_SetString(PyExc_ValueError, "Expected only strings or None for tdim");
+                return 1;
+            }
+
+            sprintf(keyname, "TDIM%d", colnum);
+
+            tdim = get_object_as_string(tmp);
+            fits_write_key(fits, TSTRING, keyname, tdim, NULL, &status);
+            free(tdim);
+
+            if (status) {
+                set_ioerr_string_from_status(status);
+                return 1;
+            }
+        }
+    }
+
+
+    return 0;
+}
+
+
+// create a new table structure.  No physical rows are added yet.
+static PyObject *
+PyFITSObject_create_table_hdu(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int table_type=0, nkeys=0;
+    int nfields=0;
+    LONGLONG nrows=0; // start empty
+
+    static char *kwlist[] = {
+        "table_type","nkeys", "ttyp","tform",
+        "tunit", "tdim", "extname", "extver", NULL};
+    // these are all strings
+    PyObject* ttypObj=NULL;
+    PyObject* tformObj=NULL;
+    PyObject* tunitObj=NULL;    // optional
+    PyObject* tdimObj=NULL;     // optional
+
+    // these must be freed
+    struct stringlist* ttyp=NULL;
+    struct stringlist* tform=NULL;
+    struct stringlist* tunit=NULL;
+    //struct stringlist* tdim=stringlist_new();
+    char* extname=NULL;
+    char* extname_use=NULL;
+    int extver=0;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOO|OOsi", kwlist,
+                          &table_type, &nkeys, &ttypObj, &tformObj, &tunitObj, &tdimObj, &extname, &extver)) {
+        return NULL;
+    }
+
+    ttyp=stringlist_new();
+    tform=stringlist_new();
+    tunit=stringlist_new();
+    if (stringlist_addfrom_listobj(ttyp, ttypObj, "names")) {
+        status=99;
+        goto create_table_cleanup;
+    }
+
+    if (stringlist_addfrom_listobj(tform, tformObj, "formats")) {
+        status=99;
+        goto create_table_cleanup;
+    }
+
+    if (tunitObj != NULL && tunitObj != Py_None) {
+        if (stringlist_addfrom_listobj(tunit, tunitObj,"units")) {
+            status=99;
+            goto create_table_cleanup;
+        }
+    }
+
+    if (extname != NULL) {
+        if (strlen(extname) > 0) {
+            extname_use = extname;
+        }
+    }
+    nfields = ttyp->size;
+    if ( fits_create_tbl(self->fits, table_type, nrows, nfields, 
+                         ttyp->data, tform->data, tunit->data, extname_use, &status) ) {
+        set_ioerr_string_from_status(status);
+        goto create_table_cleanup;
+    }
+
+    if (add_tdims_from_listobj(self->fits, tdimObj, nfields)) {
+        status=99;
+        goto create_table_cleanup;
+    }
+
+    if (extname_use != NULL) {
+        if (extver > 0) {
+
+            if (fits_update_key_lng(self->fits, "EXTVER", (LONGLONG) extver, NULL, &status)) {
+                set_ioerr_string_from_status(status);
+                goto create_table_cleanup;
+            }
+        }
+    }
+
+    if (nkeys > 0) {
+        if (fits_set_hdrsize(self->fits, nkeys, &status) ) {
+            set_ioerr_string_from_status(status);
+            goto create_table_cleanup;
+        }
+    }
+
+    // this does a full close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        goto create_table_cleanup;
+    }
+
+create_table_cleanup:
+    ttyp = stringlist_delete(ttyp);
+    tform = stringlist_delete(tform);
+    tunit = stringlist_delete(tunit);
+    //tdim = stringlist_delete(tdim);
+
+
+    if (status != 0) {
+        return NULL;
+    }
+    Py_RETURN_NONE;
+}
+
+
+
+
+// create a new table structure.  No physical rows are added yet.
+static PyObject *
+PyFITSObject_insert_col(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+    int colnum=0;
+
+    int hdutype=0;
+
+    static char *kwlist[] = {"hdunum","colnum","ttyp","tform","tdim", NULL};
+    // these are all strings
+    char* ttype=NULL; // field name
+    char* tform=NULL; // format
+    PyObject* tdimObj=NULL;     // optional, a list of len 1
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiss|O", kwlist,
+                          &hdunum, &colnum, &ttype, &tform, &tdimObj)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_insert_col(self->fits, colnum, ttype, tform, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // OK if dims are not sent
+    if (tdimObj != NULL && tdimObj != Py_None) {
+        PyObject* tmp=NULL;
+        char* tdim=NULL;
+        char keyname[20];
+
+        sprintf(keyname, "TDIM%d", colnum);
+        tmp = PyList_GetItem(tdimObj, 0);
+
+        tdim = get_object_as_string(tmp);
+        fits_write_key(self->fits, TSTRING, keyname, tdim, NULL, &status);
+        free(tdim);
+
+        if (status) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    }
+
+    // this does a full close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+
+
+// No error checking performed here
+static
+int write_string_column( 
+        fitsfile *fits,  /* I - FITS file pointer                       */
+        int  colnum,     /* I - number of column to write (1 = 1st col) */
+        LONGLONG  firstrow,  /* I - first row to write (1 = 1st row)        */
+        LONGLONG  firstelem, /* I - first vector element to write (1 = 1st) */
+        LONGLONG  nelem,     /* I - number of strings to write              */
+        char  *data,
+        int  *status) {   /* IO - error status                           */
+
+    LONGLONG i=0;
+    LONGLONG twidth=0;
+    // need to create a char** representation of the data, just point back
+    // into the data array at string width offsets.  the fits_write_col_str
+    // takes care of skipping between fields.
+    char* cdata=NULL;
+    char** strdata=NULL;
+
+    // using struct def here, could cause problems
+    twidth = fits->Fptr->tableptr[colnum-1].twidth;
+
+    strdata = malloc(nelem*sizeof(char*));
+    if (strdata == NULL) {
+        PyErr_SetString(PyExc_MemoryError, "could not allocate temporary string pointers");
+        *status = 99;
+        return 1;
+    }
+    cdata = (char* ) data;
+    for (i=0; i<nelem; i++) {
+        strdata[i] = &cdata[twidth*i];
+    }
+
+    if( fits_write_col_str(fits, colnum, firstrow, firstelem, nelem, strdata, status)) {
+        set_ioerr_string_from_status(*status);
+        free(strdata);
+        return 1;
+    }
+
+
+    free(strdata);
+
+    return 0;
+}
+
+
+// write a column, starting at firstrow.  On the python side, the firstrow kwd
+// should default to 1.
+// You can append rows using firstrow = nrows+1
+/*
+static PyObject *
+PyFITSObject_write_column(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+    int colnum=0;
+    int write_bitcols=0;
+    PyObject* array=NULL;
+
+    void* data=NULL;
+    PY_LONG_LONG firstrow_py=0;
+    LONGLONG firstrow=1;
+    LONGLONG firstelem=1;
+    LONGLONG nelem=0;
+    int npy_dtype=0;
+    int fits_dtype=0;
+
+    static char *kwlist[] = {"hdunum","colnum","array","firstrow","write_bitcols", NULL};
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOLi", 
+                                     kwlist, &hdunum, &colnum, &array, &firstrow_py, &write_bitcols)) {
+        return NULL;
+    }
+    firstrow = (LONGLONG) firstrow_py;
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    if (!PyArray_Check(array)) {
+        PyErr_SetString(PyExc_ValueError,"only arrays can be written to columns");
+        return NULL;
+    }
+
+    npy_dtype = PyArray_TYPE(array);
+    fits_dtype = npy_to_fits_table_type(npy_dtype, write_bitcols);
+    if (fits_dtype == -9999) {
+        return NULL;
+    }
+    if (fits_dtype == TLOGICAL) {
+        int tstatus=0, ttype=0;
+        LONGLONG trepeat=0, twidth=0;
+        // if the column exists and is declared TBIT we will write
+        // that way instead
+        if (fits_get_coltypell(self->fits, colnum,
+                               &ttype, &trepeat, &twidth, &tstatus)==0) {
+            // if we don't get here its because the column doesn't exist
+            // yet and that's ok
+            if (ttype==TBIT) {
+                fits_dtype=TBIT;
+            }
+        }
+    }
+
+
+
+    data = PyArray_DATA(array);
+    nelem = PyArray_SIZE(array);
+
+    if (fits_dtype == TSTRING) {
+
+        // this is my wrapper for strings
+        if (write_string_column(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    } else if (fits_dtype == TBIT) {
+        if (fits_write_col_bit(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    } else {
+        if( fits_write_col(self->fits, fits_dtype, colnum, firstrow, firstelem, nelem, data, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    }
+
+    // this is a full file close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    Py_RETURN_NONE;
+}
+*/
+
+static PyObject *
+PyFITSObject_write_columns(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+    int write_bitcols=0;
+    //void **data_ptrs=NULL;
+    PyObject* colnum_list=NULL;
+    PyObject* array_list=NULL;
+    PyObject *tmp_array=NULL, *tmp_obj=NULL;
+
+    Py_ssize_t ncols=0;
+
+    void* data=NULL;
+    PY_LONG_LONG firstrow_py=0;
+    LONGLONG firstrow=1, thisrow=0;
+    LONGLONG firstelem=1;
+    LONGLONG nelem=0;
+    LONGLONG *nperrow=NULL;
+    int npy_dtype=0;
+    int *fits_dtypes=NULL;
+    int *is_string=NULL, *colnums=NULL;
+    void **array_ptrs=NULL;
+
+    npy_intp ndim=0, *dims=NULL;
+    Py_ssize_t irow=0, icol=0, j=0;;
+
+    static char *kwlist[] = {"hdunum","colnums","arraylist","firstrow","write_bitcols", NULL};
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "iOOLi", 
+                                     kwlist, &hdunum, &colnum_list, &array_list, &firstrow_py, &write_bitcols)) {
+        return NULL;
+    }
+    firstrow = (LONGLONG) firstrow_py;
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    if (!PyList_Check(colnum_list)) {
+        PyErr_SetString(PyExc_ValueError,"colnums must be a list");
+        return NULL;
+    }
+    if (!PyList_Check(array_list)) {
+        PyErr_SetString(PyExc_ValueError,"colnums must be a list");
+        return NULL;
+    }
+    ncols = PyList_Size(colnum_list);
+    if (ncols == 0) {
+        goto _fitsio_pywrap_write_columns_bail;
+    }
+    if (ncols != PyList_Size(array_list)) {
+        PyErr_Format(PyExc_ValueError,"colnum and array lists not same size: %ld/%ld",
+                     ncols, PyList_Size(array_list));
+    }
+
+    // from here on we'll have some temporary arrays we have to free
+    is_string = calloc(ncols, sizeof(int));
+    colnums = calloc(ncols, sizeof(int));
+    array_ptrs = calloc(ncols, sizeof(void*));
+    nperrow = calloc(ncols, sizeof(LONGLONG));
+    fits_dtypes = calloc(ncols, sizeof(int));
+
+    for (icol=0; icol<ncols; icol++) {
+
+        tmp_obj = PyList_GetItem(colnum_list,icol);
+#if PY_MAJOR_VERSION >= 3
+        colnums[icol] = 1+(int) PyLong_AsLong(tmp_obj);
+#else
+        colnums[icol] = 1+(int) PyInt_AsLong(tmp_obj);
+#endif
+
+        tmp_array = PyList_GetItem(array_list, icol);
+        npy_dtype = PyArray_TYPE(tmp_array);
+
+        fits_dtypes[icol] = npy_to_fits_table_type(npy_dtype, write_bitcols);
+        if (fits_dtypes[icol] == -9999) {
+            status=1;
+            goto _fitsio_pywrap_write_columns_bail;
+        }
+        if (fits_dtypes[icol] == TLOGICAL) {
+            int tstatus=0, ttype=0;
+            LONGLONG trepeat=0, twidth=0;
+            // if the column exists and is declared TBIT we will write
+            // that way instead
+            if (fits_get_coltypell(self->fits, colnums[icol],
+                                   &ttype, &trepeat, &twidth, &tstatus)==0) {
+                // if we don't get here its because the column doesn't exist
+                // yet and that's ok
+                if (ttype==TBIT) {
+                    fits_dtypes[icol]=TBIT;
+                }
+            }
+        }
+
+        if (fits_dtypes[icol]==TSTRING) {
+            is_string[icol] = 1;
+        }
+        ndim = PyArray_NDIM(tmp_array);
+        dims = PyArray_DIMS(tmp_array);
+        if (icol==0) {
+            nelem = dims[0];
+        } else {
+            if (dims[0] != nelem) {
+                PyErr_Format(PyExc_ValueError,
+                        "not all entries have same row count, "
+                        "%lld/%ld", nelem,dims[0]);
+                status=1;
+                goto _fitsio_pywrap_write_columns_bail;
+            }
+        }
+
+        array_ptrs[icol] = tmp_array;
+
+        nperrow[icol] = 1;
+        for (j=1; j<ndim; j++) {
+            nperrow[icol] *= dims[j];
+        }
+    }
+
+    for (irow=0; irow<nelem; irow++) {
+        thisrow = firstrow + irow;
+        for (icol=0; icol<ncols; icol++) {
+            data=PyArray_GETPTR1(array_ptrs[icol], irow);
+            if (is_string[icol]) {
+                if (write_string_column(self->fits, 
+                                        colnums[icol], 
+                                        thisrow, 
+                                        firstelem, 
+                                        nperrow[icol], 
+                                        (char*)data, 
+                                        &status)) {
+                    set_ioerr_string_from_status(status);
+                    goto _fitsio_pywrap_write_columns_bail;
+                }
+
+            } else if (fits_dtypes[icol] == TBIT) {
+                if (fits_write_col_bit(self->fits,
+                                       colnums[icol],
+                                       thisrow,
+                                       firstelem,
+                                       nperrow[icol],
+                                       data,
+                                       &status)) {
+                    set_ioerr_string_from_status(status);
+                    goto _fitsio_pywrap_write_columns_bail;
+                }
+            } else {
+                if( fits_write_col(self->fits, 
+                                   fits_dtypes[icol], 
+                                   colnums[icol], 
+                                   thisrow, 
+                                   firstelem, 
+                                   nperrow[icol], 
+                                   data, 
+                                   &status)) {
+                    set_ioerr_string_from_status(status);
+                    goto _fitsio_pywrap_write_columns_bail;
+                }
+            }
+        }
+    }
+    /*
+    nelem = PyArray_SIZE(array);
+
+    if (fits_dtype == TSTRING) {
+
+        // this is my wrapper for strings
+        if (write_string_column(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+        
+    } else {
+        if( fits_write_col(self->fits, fits_dtype, colnum, firstrow, firstelem, nelem, data, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    }
+
+    // this is a full file close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    */
+
+_fitsio_pywrap_write_columns_bail:
+    free(is_string); is_string=NULL;
+    free(colnums); colnums=NULL;
+    free(array_ptrs); array_ptrs=NULL;
+    free(nperrow); nperrow=NULL;
+    free(fits_dtypes); fits_dtypes=NULL;
+    if (status != 0) {
+        return NULL;
+    }
+    Py_RETURN_NONE;
+}
+
+
+
+
+
+
+
+
+// No error checking performed here
+static
+int write_var_string_column( 
+        fitsfile *fits,  /* I - FITS file pointer                       */
+        int  colnum,     /* I - number of column to write (1 = 1st col) */
+        LONGLONG  firstrow,  /* I - first row to write (1 = 1st row)        */
+        PyObject* array,
+        int  *status) {   /* IO - error status                           */
+
+    LONGLONG firstelem=1; // ignored
+    LONGLONG nelem=1; // ignored
+    npy_intp nrows=0;
+    npy_intp i=0;
+    char* ptr=NULL;
+    int res=0;
+
+    PyObject* el=NULL;
+    char* strdata=NULL;
+    char* strarr[1];
+
+
+    nrows = PyArray_SIZE(array);
+    for (i=0; i<nrows; i++) {
+        ptr = PyArray_GetPtr((PyArrayObject*) array, &i);
+        el = PyArray_GETITEM(array, ptr);
+
+        strdata=get_object_as_string(el);
+
+        // just a container
+        strarr[0] = strdata;
+        res=fits_write_col_str(fits, colnum, 
+                               firstrow+i, firstelem, nelem, 
+                               strarr, status);
+
+        free(strdata);
+        if(res > 0) {
+            goto write_var_string_column_cleanup;
+        }
+    }
+
+write_var_string_column_cleanup:
+
+    if (*status > 0) {
+        return 1;
+    }
+
+    return 0;
+}
+
+/* 
+ * No error checking performed here
+ */
+static
+int write_var_num_column( 
+        fitsfile *fits,  /* I - FITS file pointer                       */
+        int  colnum,     /* I - number of column to write (1 = 1st col) */
+        LONGLONG  firstrow,  /* I - first row to write (1 = 1st row)        */
+        int fits_dtype, 
+        PyObject* array,
+        int  *status) {   /* IO - error status                           */
+
+    LONGLONG firstelem=1;
+    npy_intp nelem=0;
+    npy_intp nrows=0;
+    npy_intp i=0;
+    PyObject* el=NULL;
+    PyObject* el_array=NULL;
+    void* data=NULL;
+    void* ptr=NULL;
+
+    int npy_dtype=0, isvariable=0;
+
+    int mindepth=1, maxdepth=0;
+    PyObject* context=NULL;
+    int requirements = 
+        NPY_C_CONTIGUOUS 
+        | NPY_ALIGNED 
+        | NPY_NOTSWAPPED 
+        | NPY_ELEMENTSTRIDES;
+
+    int res=0;
+
+    npy_dtype = fits_to_npy_table_type(fits_dtype, &isvariable);
+
+    nrows = PyArray_SIZE(array);
+    for (i=0; i<nrows; i++) {
+        ptr = PyArray_GetPtr((PyArrayObject*) array, &i);
+        el = PyArray_GETITEM(array, ptr);
+
+        // a copy is only made if needed
+        el_array = PyArray_CheckFromAny(el, PyArray_DescrFromType(npy_dtype), 
+                                        mindepth, maxdepth, 
+                                        requirements, context);
+        if (el_array == NULL) {
+            // error message will already be set
+            return 1;
+        }
+
+        nelem=PyArray_SIZE(el);
+        data=PyArray_DATA(el_array);
+        res=fits_write_col(fits, abs(fits_dtype), colnum, 
+                           firstrow+i, firstelem, (LONGLONG) nelem, data, status);
+        Py_XDECREF(el_array);
+
+        if(res > 0) {
+            set_ioerr_string_from_status(*status);
+            return 1;
+        }
+    }
+
+    return 0;
+}
+
+
+
+
+/* 
+ * write a variable length column, starting at firstrow.  On the python side,
+ * the firstrow kwd should default to 1.  You can append rows using firstrow =
+ * nrows+1
+ *
+ * The input array should be of type NPY_OBJECT, and the elements
+ * should be either all strings or numpy arrays of the same type
+ */
+
+static PyObject *
+PyFITSObject_write_var_column(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+    int colnum=0;
+    PyObject* array=NULL;
+
+    PY_LONG_LONG firstrow_py=0;
+    LONGLONG firstrow=1;
+    int npy_dtype=0;
+    int fits_dtype=0;
+
+    static char *kwlist[] = {"hdunum","colnum","array","firstrow", NULL};
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOL", 
+                  kwlist, &hdunum, &colnum, &array, &firstrow_py)) {
+        return NULL;
+    }
+    firstrow = (LONGLONG) firstrow_py;
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    if (!PyArray_Check(array)) {
+        PyErr_SetString(PyExc_ValueError,"only arrays can be written to columns");
+        return NULL;
+    }
+
+    npy_dtype = PyArray_TYPE(array);
+    if (npy_dtype != NPY_OBJECT) {
+        PyErr_SetString(PyExc_TypeError,"only object arrays can be written to variable length columns");
+        return NULL;
+    }
+
+    // determine the fits dtype for this column.  We will use this to get data
+    // from the array for writing
+    if (fits_get_eqcoltypell(self->fits, colnum, &fits_dtype, NULL, NULL, &status) > 0) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_dtype == -TSTRING) {
+        if (write_var_string_column(self->fits, colnum, firstrow, array, &status)) {
+            if (status != 0) {
+                set_ioerr_string_from_status(status);
+            }
+            return NULL;
+        }
+    } else {
+        if (write_var_num_column(self->fits, colnum, firstrow, fits_dtype, array, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    }
+
+    // this is a full file close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    Py_RETURN_NONE;
+}
+
+
+/*
+    case for writing an entire record
+*/
+static PyObject *
+PyFITSObject_write_record(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* cardin=NULL;
+    char card[FLEN_CARD];
+    if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &cardin)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    strncpy(card, cardin, FLEN_CARD);
+
+    if (fits_write_record(self->fits, card, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_string_key(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* keyname=NULL;
+    char* value=NULL;
+    char* comment=NULL;
+    char* comment_in=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"isss", &hdunum, &keyname, &value, &comment_in)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (strlen(comment_in) > 0) {
+        comment=comment_in;
+    }
+
+    if (fits_write_key_longstr(self->fits, keyname, value, comment, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+static PyObject *
+PyFITSObject_write_double_key(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    int decimals=-15;
+
+    char* keyname=NULL;
+    double value=0;
+    char* comment=NULL;
+    char* comment_in=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"isds", &hdunum, &keyname, &value, &comment_in)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (strlen(comment_in) > 0) {
+        comment=comment_in;
+    }
+
+    if (fits_update_key_dbl(self->fits, keyname, value, decimals, comment, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    Py_RETURN_NONE;
+}
+static PyObject *
+PyFITSObject_write_long_long_key(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* keyname=NULL;
+    long long value=0;
+    char* comment=NULL;
+    char* comment_in=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"isLs", &hdunum, &keyname, &value, &comment_in)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (strlen(comment_in) > 0) {
+        comment=comment_in;
+    }
+
+    if (fits_update_key_lng(self->fits, keyname, (LONGLONG) value, comment, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+static PyObject *
+PyFITSObject_write_logical_key(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* keyname=NULL;
+    int value=0;
+    char* comment=NULL;
+    char* comment_in=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"isis", &hdunum, &keyname, &value, &comment_in)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (strlen(comment_in) > 0) {
+        comment=comment_in;
+    }
+
+    if (fits_update_key_log(self->fits, keyname, value, comment, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_comment(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* comment=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &comment)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_write_comment(self->fits, comment, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_history(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* history=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &history)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_write_history(self->fits, history, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+//   ADW: Adapted from ffpcom and ffphis in putkey.c
+int fits_write_continue( fitsfile *fptr,      /* I - FITS file pointer  */
+                         const char *cont,    /* I - continue string    */
+                         int   *status)       /* IO - error status      */
+/*
+  Write 1 or more CONTINUE keywords.  If the history string is too
+  long to fit on a single keyword (72 chars) then it will automatically
+  be continued on multiple CONTINUE keywords.
+*/
+{
+    char card[FLEN_CARD];
+    int len, ii;
+
+    if (*status > 0)           /* inherit input status value if > 0 */
+        return(*status);
+
+    len = strlen(cont);
+    ii = 0;
+
+    for (; len > 0; len -= 72)
+    {
+        strcpy(card, "CONTINUE");
+        strncat(card, &cont[ii], 72);
+        ffprec(fptr, card, status);
+        ii += 72;
+    }
+
+    return(*status);
+}
+
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_continue(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* value=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &value)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_write_continue(self->fits, value, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+static PyObject *
+PyFITSObject_write_undefined_key(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    char* keyname=NULL;
+    char* comment=NULL;
+    char* comment_in=NULL;
+    if (!PyArg_ParseTuple(args, (char*)"iss", &hdunum, &keyname, &comment_in)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (strlen(comment_in) > 0) {
+        comment=comment_in;
+    }
+
+    if (fits_update_key_null(self->fits, keyname, comment, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does not close and reopen
+    if (fits_flush_buffer(self->fits, 0, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+/*
+   insert a set of rows
+*/
+
+static PyObject *
+PyFITSObject_insert_rows(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+
+    int hdutype=0;
+    PY_LONG_LONG firstrow_py=0, nrows_py=0;
+    LONGLONG firstrow=0, nrows=0;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"iLL",
+                          &hdunum, &firstrow_py, &nrows_py)) {
+        return NULL;
+    }
+
+    firstrow = (LONGLONG) firstrow_py;
+    nrows = (LONGLONG) nrows_py;
+
+    if (nrows <= 0) {
+        // nothing to do, just return
+        Py_RETURN_NONE;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_insert_rows(self->fits, firstrow, nrows, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does a full close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+/*
+
+   delete a range of rows
+
+   input stop is like a python slice, so exclusive, but 1-offset
+   rather than 0-offset
+*/
+
+static PyObject *
+PyFITSObject_delete_row_range(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+
+    int hdutype=0;
+    PY_LONG_LONG slice_start_py=0, slice_stop_py=0;
+    LONGLONG slice_start=0, slice_stop=0, nrows=0;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"iLL",
+                          &hdunum, &slice_start_py, &slice_stop_py)) {
+        return NULL;
+    }
+
+    slice_start = (LONGLONG) slice_start_py;
+    slice_stop = (LONGLONG) slice_stop_py;
+    nrows = slice_stop - slice_start;
+
+    if (nrows <= 0) {
+        // nothing to do, just return
+        Py_RETURN_NONE;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_delete_rows(self->fits, slice_start, nrows, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does a full close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+/*
+
+   delete a specific set of rows, 1-offset
+
+   no type checking is applied to the rows
+*/
+
+static PyObject *
+PyFITSObject_delete_rows(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+    int status=0;
+    int hdunum=0;
+
+    int hdutype=0;
+    PyObject *rows_array=NULL;
+    LONGLONG *rows=NULL, nrows=0;
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+        return NULL;
+    }
+
+    if (!PyArg_ParseTuple(args, (char*)"iO",
+                          &hdunum, &rows_array)) {
+        return NULL;
+    }
+
+    rows = (LONGLONG *) PyArray_DATA(rows_array);
+    nrows = PyArray_SIZE(rows_array);
+    if (nrows <= 0) {
+        Py_RETURN_NONE;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_delete_rowlistll(self->fits, rows, nrows, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // this does a full close and reopen
+    if (fits_flush_file(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+
+
+/*
+ * read a single, entire column from an ascii table into the input array.  This
+ * version uses the standard read column instead of our by-bytes version.
+ *
+ * A number of assumptions are made, such as that columns are scalar, which
+ * is true for ascii.
+ */
+
+static int read_ascii_column_all(fitsfile* fits, int colnum, PyObject* array, int* status) {
+
+    int npy_dtype=0;
+    int fits_dtype=0;
+
+    npy_intp nelem=0;
+    LONGLONG firstelem=1;
+    LONGLONG firstrow=1;
+    int* anynul=NULL;
+    void* nulval=0;
+    char* nulstr=" ";
+    void* data=NULL;
+    char* cdata=NULL;
+
+    npy_dtype = PyArray_TYPE(array);
+    fits_dtype = npy_to_fits_table_type(npy_dtype,0);
+
+    nelem = PyArray_SIZE(array);
+
+    if (fits_dtype == TSTRING) {
+        npy_intp i=0;
+        LONGLONG rownum=0;
+
+        for (i=0; i<nelem; i++) {
+            cdata = PyArray_GETPTR1(array, i);
+            rownum = (LONGLONG) (1+i);
+            if (fits_read_col_str(fits,colnum,rownum,firstelem,1,nulstr,&cdata,anynul,status) > 0) {
+                return 1;
+            }
+        }
+
+        /*
+
+        LONGLONG twidth=0;
+        char** strdata=NULL;
+
+        cdata = (char*) PyArray_DATA(array);
+
+        strdata=malloc(nelem*sizeof(char*));
+        if (NULL==strdata) {
+            PyErr_SetString(PyExc_MemoryError, "could not allocate temporary string pointers");
+            *status = 99;
+            return 1;
+
+        }
+
+
+        twidth=fits->Fptr->tableptr[colnum-1].twidth;
+        for (i=0; i<nelem; i++) {
+            //strdata[i] = &cdata[twidth*i];
+            // this 1-d assumption works because array fields are not allowedin ascii
+            strdata[i] = (char*) PyArray_GETPTR1(array, i);
+        }
+
+        if (fits_read_col_str(fits,colnum,firstrow,firstelem,nelem,nulstr,strdata,anynul,status) > 0) {
+            free(strdata);
+            return 1;
+        }
+
+        free(strdata);
+        */
+
+    } else {
+        data=PyArray_DATA(array);
+        if (fits_read_col(fits,fits_dtype,colnum,firstrow,firstelem,nelem,nulval,data,anynul,status) > 0) {
+            return 1;
+        }
+    }
+
+    return 0;
+
+}
+static int read_ascii_column_byrow(
+        fitsfile* fits, int colnum, PyObject* array, PyObject* rowsObj, int* status) {
+
+    int npy_dtype=0;
+    int fits_dtype=0;
+
+    npy_intp nelem=0;
+    LONGLONG firstelem=1;
+    LONGLONG rownum=0;
+    npy_intp nrows=-1;
+
+    int* anynul=NULL;
+    void* nulval=0;
+    char* nulstr=" ";
+    void* data=NULL;
+    char* cdata=NULL;
+
+    int dorows=0;
+
+    npy_intp i=0;
+
+    npy_dtype = PyArray_TYPE(array);
+    fits_dtype = npy_to_fits_table_type(npy_dtype,0);
+
+    nelem = PyArray_SIZE(array);
+
+
+    if (rowsObj != Py_None) {
+        dorows=1;
+        nrows = PyArray_SIZE(rowsObj);
+        if (nrows != nelem) {
+            PyErr_Format(PyExc_ValueError, 
+                    "input array[%ld] and rows[%ld] have different size", nelem,nrows);
+            return 1;
+        }
+    }
+
+    data = PyArray_GETPTR1(array, i);
+    for (i=0; i<nrows; i++) {
+        if (dorows) {
+            rownum = (LONGLONG) (1 + *(npy_int64*) PyArray_GETPTR1(rowsObj, i));
+        } else {
+            rownum = (LONGLONG) (1+i);
+        }
+        // assuming 1-D
+        data = PyArray_GETPTR1(array, i);
+        if (fits_dtype==TSTRING) {
+            cdata = (char* ) data;
+            if (fits_read_col_str(fits,colnum,rownum,firstelem,1,nulstr,&cdata,anynul,status) > 0) {
+                return 1;
+            }
+        } else {
+            if (fits_read_col(fits,fits_dtype,colnum,rownum,firstelem,1,nulval,data,anynul,status) > 0) {
+                return 1;
+            }
+        }
+    }
+
+    return 0;
+}
+
+
+static int read_ascii_column(fitsfile* fits, int colnum, PyObject* array, PyObject* rowsObj, int* status) {
+
+    int ret=0;
+    if (rowsObj != Py_None || !PyArray_ISCONTIGUOUS(array)) {
+        ret = read_ascii_column_byrow(fits, colnum, array, rowsObj, status);
+    } else {
+        ret = read_ascii_column_all(fits, colnum, array, status);
+    }
+
+    return ret;
+}
+
+
+
+
+
+// read a subset of rows for the input column
+// the row array is assumed to be unique and sorted.
+static int read_binary_column(
+        fitsfile* fits, 
+        int colnum, 
+        npy_intp nrows, 
+        npy_int64* rows, 
+        void* data, 
+        npy_intp stride, 
+        int* status) {
+
+    FITSfile* hdu=NULL;
+    tcolumn* colptr=NULL;
+    LONGLONG file_pos=0, irow=0;
+    npy_int64 row=0;
+
+    LONGLONG repeat=0;
+    LONGLONG width=0;
+
+    int rows_sent=0;
+
+    // use char for pointer arith.  It's actually ok to use void as char but
+    // this is just in case.
+    char* ptr=NULL;
+
+    // using struct defs here, could cause problems
+    hdu = fits->Fptr;
+    colptr = hdu->tableptr + (colnum-1);
+
+    repeat = colptr->trepeat;
+    width = colptr->tdatatype == TSTRING ? 1 : colptr->twidth;
+
+    rows_sent = nrows == hdu->numrows ? 0 : 1;
+
+    ptr = (char*) data;
+    for (irow=0; irow<nrows; irow++) {
+        if (rows_sent) {
+            row = rows[irow];
+        } else {
+            row = irow;
+        }
+        file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol;
+        ffmbyt(fits, file_pos, REPORT_EOF, status);
+        if (ffgbytoff(fits, width, repeat, 0, (void*)ptr, status)) {
+            return 1;
+        }
+        ptr += stride;
+    }
+
+    return 0;
+}
+
+
+
+
+/* 
+ * read from a column into an input array
+ */
+static PyObject *
+PyFITSObject_read_column(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+    int colnum=0;
+
+    FITSfile* hdu=NULL;
+    int status=0;
+
+    PyObject* array=NULL;
+
+    PyObject* rowsObj;
+
+    if (!PyArg_ParseTuple(args, (char*)"iiOO", &hdunum, &colnum, &array, &rowsObj)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // using struct defs here, could cause problems
+    hdu = self->fits->Fptr;
+    if (hdutype == IMAGE_HDU) {
+        PyErr_SetString(PyExc_RuntimeError, "Cannot yet read columns from an IMAGE_HDU");
+        return NULL;
+    }
+    if (colnum < 1 || colnum > hdu->tfield) {
+        PyErr_SetString(PyExc_RuntimeError, "requested column is out of bounds");
+        return NULL;
+    }
+
+
+    if (hdutype == ASCII_TBL) {
+        if (read_ascii_column(self->fits, colnum, array, rowsObj, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    } else {
+        void* data=PyArray_DATA(array);
+        npy_intp nrows=0;
+        npy_int64* rows=NULL;
+        npy_intp stride=PyArray_STRIDE(array,0);
+        if (rowsObj == Py_None) {
+            nrows = hdu->numrows;
+        } else {
+            rows = get_int64_from_array(rowsObj, &nrows);
+        }
+
+        if (read_binary_column(self->fits, colnum, nrows, rows, data, stride, &status)) {
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+    }
+    Py_RETURN_NONE;
+}
+
+
+
+
+/*
+ * Free all the elements in the python list as well as the list itself
+ */
+static void free_all_python_list(PyObject* list) {
+    if (PyList_Check(list)) {
+        Py_ssize_t i=0;
+        for (i=0; i<PyList_Size(list); i++) {
+            Py_XDECREF(PyList_GetItem(list,i));
+        }
+    }
+    Py_XDECREF(list);
+}
+
+static PyObject*
+read_var_string(fitsfile* fits, int colnum, LONGLONG row, LONGLONG nchar, int* status) {
+    LONGLONG firstelem=1;
+    char* str=NULL;
+    char* strarr[1];
+    PyObject* stringObj=NULL;
+    void* nulval=0;
+    int* anynul=NULL;
+
+    str=calloc(nchar+1,sizeof(char));
+    if (str == NULL) {
+        PyErr_Format(PyExc_MemoryError, 
+                     "Could not allocate string of size %lld", nchar);
+        return NULL;
+    }
+
+    strarr[0] = str;
+    if (fits_read_col(fits,TSTRING,colnum,row,firstelem,nchar,nulval,strarr,anynul,status) > 0) {
+        goto read_var_string_cleanup;
+    }
+#if PY_MAJOR_VERSION >= 3
+    // bytes
+    stringObj = Py_BuildValue("y",str);
+#else
+    stringObj = Py_BuildValue("s",str);
+#endif
+    if (NULL == stringObj) {
+        PyErr_Format(PyExc_MemoryError, 
+                     "Could not allocate py string of size %lld", nchar);
+        goto read_var_string_cleanup;
+    }
+
+read_var_string_cleanup:
+    free(str);
+
+    return stringObj;
+}
+static PyObject*
+read_var_nums(fitsfile* fits, int colnum, LONGLONG row, LONGLONG nelem, 
+              int fits_dtype, int npy_dtype, int* status) {
+    LONGLONG firstelem=1;
+    PyObject* arrayObj=NULL;
+    void* nulval=0;
+    int* anynul=NULL;
+    npy_intp dims[1];
+    int fortran=0;
+    void* data=NULL;
+
+
+    dims[0] = nelem;
+    arrayObj=PyArray_ZEROS(1, dims, npy_dtype, fortran);
+    if (arrayObj==NULL) {
+        PyErr_Format(PyExc_MemoryError, 
+                     "Could not allocate array type %d size %lld",npy_dtype,nelem);
+        return NULL;
+    }
+    data = PyArray_DATA(arrayObj);
+    if (fits_read_col(fits,abs(fits_dtype),colnum,row,firstelem,nelem,nulval,data,anynul,status) > 0) {
+        Py_XDECREF(arrayObj);
+        return NULL;
+    }
+
+    return arrayObj;
+}
+/*
+ * read a variable length column as a list of arrays
+ * what about strings?
+ */
+static PyObject *
+PyFITSObject_read_var_column_as_list(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int colnum=0;
+    PyObject* rowsObj=NULL;
+
+    int hdutype=0;
+    int ncols=0;
+    const npy_int64* rows=NULL;
+    LONGLONG nrows=0;
+    int get_all_rows=0;
+
+    int status=0, tstatus=0;
+
+    int fits_dtype=0;
+    int npy_dtype=0;
+    int isvariable=0;
+    LONGLONG repeat=0;
+    LONGLONG width=0;
+    LONGLONG offset=0;
+    LONGLONG i=0;
+    LONGLONG row=0;
+
+    PyObject* listObj=NULL;
+    PyObject* tempObj=NULL;
+
+    if (!PyArg_ParseTuple(args, (char*)"iiO", &hdunum, &colnum, &rowsObj)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (hdutype == IMAGE_HDU) {
+        PyErr_SetString(PyExc_RuntimeError, "Cannot yet read columns from an IMAGE_HDU");
+        return NULL;
+    }
+    // using struct defs here, could cause problems
+    fits_get_num_cols(self->fits, &ncols, &status);
+    if (colnum < 1 || colnum > ncols) {
+        PyErr_SetString(PyExc_RuntimeError, "requested column is out of bounds");
+        return NULL;
+    }
+
+    if (fits_get_coltypell(self->fits, colnum, &fits_dtype, &repeat, &width, &status) > 0) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    npy_dtype = fits_to_npy_table_type(fits_dtype, &isvariable);
+    if (npy_dtype < 0) {
+        return NULL;
+    }
+    if (!isvariable) {
+        PyErr_Format(PyExc_TypeError,"Column %d not a variable length %d", colnum, fits_dtype); 
+        return NULL;
+    }
+    
+    if (rowsObj == Py_None) {
+        fits_get_num_rowsll(self->fits, &nrows, &tstatus);
+        get_all_rows=1;
+    } else {
+        npy_intp tnrows=0;
+        rows = (const npy_int64*) get_int64_from_array(rowsObj, &tnrows);
+        nrows=(LONGLONG) tnrows;
+        get_all_rows=0;
+    }
+
+    listObj = PyList_New(0);
+
+    for (i=0; i<nrows; i++) {
+        tempObj=NULL;
+
+        if (get_all_rows) {
+            row = i+1;
+        } else {
+            row = (LONGLONG) (rows[i]+1);
+        }
+
+        // repeat holds how many elements are in this row
+        if (fits_read_descriptll(self->fits, colnum, row, &repeat, &offset, &status) > 0) {
+            goto read_var_column_cleanup;
+        }
+
+        if (fits_dtype == -TSTRING) {
+            tempObj = read_var_string(self->fits,colnum,row,repeat,&status);
+        } else {
+            tempObj = read_var_nums(self->fits,colnum,row,repeat,
+                                    fits_dtype,npy_dtype,&status);
+        }
+        if (tempObj == NULL) {
+            tstatus=1;
+            goto read_var_column_cleanup;
+        }
+        PyList_Append(listObj, tempObj);
+        Py_XDECREF(tempObj);
+    }
+
+
+read_var_column_cleanup:
+
+    if (status != 0 || tstatus != 0) {
+        Py_XDECREF(tempObj);
+        free_all_python_list(listObj);
+        if (status != 0) {
+            set_ioerr_string_from_status(status);
+        }
+        return NULL;
+    }
+
+    return listObj;
+}
+
+// read specified columns and rows
+static int read_binary_rec_columns(
+        fitsfile* fits, 
+        npy_intp ncols, npy_int64* colnums, 
+        npy_intp nrows, npy_int64* rows,
+        void* data, int* status) {
+    FITSfile* hdu=NULL;
+    tcolumn* colptr=NULL;
+    LONGLONG file_pos=0;
+    npy_intp col=0;
+    npy_int64 colnum=0;
+
+    int rows_sent=0;
+    npy_intp irow=0;
+    npy_int64 row=0;
+
+    // use char for pointer arith.  It's actually ok to use void as char but
+    // this is just in case.
+    char* ptr;
+
+    LONGLONG gsize=0; // number of bytes in column
+    LONGLONG repeat=0;
+    LONGLONG width=0;
+
+    // using struct defs here, could cause problems
+    hdu = fits->Fptr;
+
+    rows_sent = nrows == hdu->numrows ? 0 : 1;
+
+    ptr = (char*) data;
+    for (irow=0; irow<nrows; irow++) {
+        if (rows_sent) {
+            row = rows[irow];
+        } else {
+            row = irow;
+        }
+        for (col=0; col < ncols; col++) {
+
+            colnum = colnums[col];
+            colptr = hdu->tableptr + (colnum-1);
+
+            repeat = colptr->trepeat;
+            width = colptr->tdatatype == TSTRING ? 1 : colptr->twidth;
+            gsize = repeat*width;
+
+            file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol;
+
+            if (colptr->tdatatype == TBIT) {
+                if (fits_read_col_bit(fits, colnum, row+1, 1, repeat, (char*)ptr, status)) {
+                    return 1;
+                }
+            } else {
+                // can just do one status check, since status are inherited.
+                ffmbyt(fits, file_pos, REPORT_EOF, status);
+                if (ffgbytoff(fits, width, repeat, 0, (void*)ptr, status)) {
+                    return 1;
+                }
+            }
+            ptr += gsize;
+        }
+    }
+
+    return 0;
+}
+
+
+
+// python method for reading specified columns and rows
+static PyObject *
+PyFITSObject_read_columns_as_rec(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+    npy_intp ncols=0;
+    npy_int64* colnums=NULL;
+    FITSfile* hdu=NULL;
+
+    int status=0;
+
+    PyObject* columnsobj=NULL;
+    PyObject* array=NULL;
+    void* data=NULL;
+
+    PyObject* rowsobj=NULL;
+
+    if (!PyArg_ParseTuple(args, (char*)"iOOO", &hdunum, &columnsobj, &array, &rowsobj)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        goto recread_columns_cleanup;
+    }
+
+    if (hdutype == IMAGE_HDU) {
+        PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+        return NULL;
+    }
+
+    colnums = get_int64_from_array(columnsobj, &ncols);
+    if (colnums == NULL) {
+        return NULL;
+    }
+
+    hdu = self->fits->Fptr;
+    data = PyArray_DATA(array);
+    npy_intp nrows;
+    npy_int64* rows=NULL;
+    if (rowsobj == Py_None) {
+        nrows = hdu->numrows;
+    } else {
+        rows = get_int64_from_array(rowsobj, &nrows);
+    }
+    if (read_binary_rec_columns(self->fits, ncols, colnums, nrows, rows, data, &status)) {
+        goto recread_columns_cleanup;
+    }
+
+recread_columns_cleanup:
+
+    if (status != 0) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    Py_RETURN_NONE;
+}
+
+
+
+/* 
+ * read specified columns and rows
+ *
+ * Move by offset instead of just groupsize; this allows us to read into a
+ * recarray while skipping some fields, e.g. variable length array fields, to
+ * be read separately.
+ *
+ * If rows is NULL, then nrows are read consecutively.
+ */
+
+static int read_columns_as_rec_byoffset(
+        fitsfile* fits, 
+        npy_intp ncols, 
+        const npy_int64* colnums,         // columns to read from file
+        const npy_int64* field_offsets,   // offsets of corresponding fields within array
+        npy_intp nrows, 
+        const npy_int64* rows,
+        char* data, 
+        npy_intp recsize, 
+        int* status) {
+
+    FITSfile* hdu=NULL;
+    tcolumn* colptr=NULL;
+    LONGLONG file_pos=0;
+    npy_intp col=0;
+    npy_int64 colnum=0;
+
+    char* ptr=NULL;
+
+    int get_all_rows=1;
+    npy_intp irow=0;
+    npy_int64 row=0;
+
+    long groupsize=0; // number of bytes in column
+    long ngroups=1; // number to read, one for row-by-row reading
+    long group_gap=0; // gap between groups, zero since we aren't using it
+
+    if (rows != NULL) {
+        get_all_rows=0;
+    }
+
+    // using struct defs here, could cause problems
+    hdu = fits->Fptr;
+    for (irow=0; irow<nrows; irow++) {
+        if (get_all_rows) {
+            row=irow;
+        } else {
+            row = rows[irow];
+        }
+        for (col=0; col < ncols; col++) {
+
+            // point to this field in the array, allows for skipping
+            ptr = data + irow*recsize + field_offsets[col];
+
+            colnum = colnums[col];
+            colptr = hdu->tableptr + (colnum-1);
+
+            groupsize = get_groupsize(colptr);
+
+            file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol;
+
+            // can just do one status check, since status are inherited.
+            ffmbyt(fits, file_pos, REPORT_EOF, status);
+            if (ffgbytoff(fits, groupsize, ngroups, group_gap, (void*) ptr, status)) {
+                return 1;
+            }
+        }
+    }
+
+    return 0;
+}
+
+
+
+
+
+
+
+/* python method for reading specified columns and rows, moving by offset in
+ * the array to allow some fields not read.
+ *
+ * columnsObj is the columns in the fits file to read.
+ * offsetsObj is the offsets of the corresponding fields into the array.
+ */
+static PyObject *
+PyFITSObject_read_columns_as_rec_byoffset(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    npy_intp ncols=0;
+    npy_intp noffsets=0;
+    npy_intp nrows=0;
+    const npy_int64* colnums=NULL;
+    const npy_int64* offsets=NULL;
+    const npy_int64* rows=NULL;
+
+    PyObject* columnsObj=NULL;
+    PyObject* offsetsObj=NULL;
+    PyObject* rowsObj=NULL;
+
+    PyObject* array=NULL;
+    void* data=NULL;
+    npy_intp recsize=0;
+
+    if (!PyArg_ParseTuple(args, (char*)"iOOOO", &hdunum, &columnsObj, &offsetsObj, &array, &rowsObj)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        goto recread_columns_byoffset_cleanup;
+    }
+
+    if (hdutype == IMAGE_HDU) {
+        PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+        return NULL;
+    }
+    
+    colnums = (const npy_int64*) get_int64_from_array(columnsObj, &ncols);
+    if (colnums == NULL) {
+        return NULL;
+    }
+    offsets = (const npy_int64*) get_int64_from_array(offsetsObj, &noffsets);
+    if (offsets == NULL) {
+        return NULL;
+    }
+    if (noffsets != ncols) {
+        PyErr_Format(PyExc_ValueError, 
+                     "%ld columns requested but got %ld offsets", 
+                     ncols, noffsets);
+        return NULL;
+    }
+
+    if (rowsObj != Py_None) {
+        rows = (const npy_int64*) get_int64_from_array(rowsObj, &nrows);
+    } else {
+        nrows = PyArray_SIZE(array);
+    }
+
+    data = PyArray_DATA(array);
+    recsize = PyArray_ITEMSIZE(array);
+    if (read_columns_as_rec_byoffset(
+                self->fits, 
+                ncols, colnums, offsets,
+                nrows, 
+                rows, 
+                (char*) data, 
+                recsize,
+                &status) > 0) {
+        goto recread_columns_byoffset_cleanup;
+    }
+
+recread_columns_byoffset_cleanup:
+
+    if (status != 0) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    Py_RETURN_NONE;
+}
+
+
+// read specified rows, all columns
+static int read_rec_bytes_byrow(
+        fitsfile* fits, 
+        npy_intp nrows, npy_int64* rows,
+        void* data, int* status) {
+
+    FITSfile* hdu=NULL;
+
+    npy_intp irow=0;
+    LONGLONG firstrow=1;
+    LONGLONG firstchar=1;
+
+    // use char for pointer arith.  It's actually ok to use void as char but
+    // this is just in case.
+    unsigned char* ptr;
+
+    // using struct defs here, could cause problems
+    hdu = fits->Fptr;
+    ptr = (unsigned char*) data;
+
+    for (irow=0; irow<nrows; irow++) {
+        // Input is zero-offset
+        firstrow = 1 + (LONGLONG) rows[irow];
+
+        if (fits_read_tblbytes(fits, firstrow, firstchar, hdu->rowlength, ptr, status)) {
+            return 1;
+        }
+
+        ptr += hdu->rowlength;
+    }
+
+    return 0;
+}
+// read specified rows, all columns
+/*
+static int read_rec_bytes_byrowold(
+        fitsfile* fits, 
+        npy_intp nrows, npy_int64* rows,
+        void* data, int* status) {
+    FITSfile* hdu=NULL;
+    LONGLONG file_pos=0;
+
+    npy_intp irow=0;
+    npy_int64 row=0;
+
+    // use char for pointer arith.  It's actually ok to use void as char but
+    // this is just in case.
+    char* ptr;
+
+    long ngroups=1; // number to read, one for row-by-row reading
+    long offset=0; // gap between groups, not stride.  zero since we aren't using it
+
+    // using struct defs here, could cause problems
+    hdu = fits->Fptr;
+    ptr = (char*) data;
+
+    for (irow=0; irow<nrows; irow++) {
+        row = rows[irow];
+        file_pos = hdu->datastart + row*hdu->rowlength;
+
+        // can just do one status check, since status are inherited.
+        ffmbyt(fits, file_pos, REPORT_EOF, status);
+        if (ffgbytoff(fits, hdu->rowlength, ngroups, offset, (void*) ptr, status)) {
+            return 1;
+        }
+        ptr += hdu->rowlength;
+    }
+
+    return 0;
+}
+*/
+
+
+// python method to read all columns but subset of rows
+static PyObject *
+PyFITSObject_read_rows_as_rec(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+
+    int status=0;
+    PyObject* array=NULL;
+    void* data=NULL;
+
+    PyObject* rowsObj=NULL;
+    npy_intp nrows=0;
+    npy_int64* rows=NULL;
+
+    if (!PyArg_ParseTuple(args, (char*)"iOO", &hdunum, &array, &rowsObj)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        goto recread_byrow_cleanup;
+    }
+
+    if (hdutype == IMAGE_HDU) {
+        PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+        return NULL;
+    }
+
+    data = PyArray_DATA(array);
+
+    rows = get_int64_from_array(rowsObj, &nrows);
+    if (rows == NULL) {
+        return NULL;
+    }
+    if (read_rec_bytes_byrow(self->fits, nrows, rows, data, &status)) {
+        goto recread_byrow_cleanup;
+    }
+
+recread_byrow_cleanup:
+
+    if (status != 0) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    Py_RETURN_NONE;
+}
+
+
+
+ /* Read the range of rows, 1-offset. It is assumed the data match the table
+ * perfectly.
+ */
+
+static int read_rec_range(fitsfile* fits, LONGLONG firstrow, LONGLONG nrows, void* data, int* status) {
+    // can also use this for reading row ranges
+    LONGLONG firstchar=1;
+    LONGLONG nchars=0;
+
+    nchars = (fits->Fptr)->rowlength*nrows;
+
+    if (fits_read_tblbytes(fits, firstrow, firstchar, nchars, (unsigned char*) data, status)) {
+        return 1;
+    }
+
+    return 0;
+}
+
+
+
+
+/* here rows are 1-offset, unlike when reading a specific subset of rows */
+static PyObject *
+PyFITSObject_read_as_rec(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+
+    int status=0;
+    PyObject* array=NULL;
+    void* data=NULL;
+
+    PY_LONG_LONG firstrow=0;
+    PY_LONG_LONG lastrow=0;
+    PY_LONG_LONG nrows=0;
+
+    if (!PyArg_ParseTuple(args, (char*)"iLLO", &hdunum, &firstrow, &lastrow, &array)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        goto recread_asrec_cleanup;
+    }
+
+    if (hdutype == IMAGE_HDU) {
+        PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+        return NULL;
+    }
+
+    data = PyArray_DATA(array);
+
+    nrows=lastrow-firstrow+1;
+    if (read_rec_range(self->fits, (LONGLONG)firstrow, (LONGLONG)nrows, data, &status)) {
+        goto recread_asrec_cleanup;
+    }
+
+recread_asrec_cleanup:
+
+    if (status != 0) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    Py_RETURN_NONE;
+}
+// read an n-dimensional "image" into the input array.  Only minimal checking
+// of the input array is done.
+// Note numpy allows a maximum of 32 dimensions
+static PyObject *
+PyFITSObject_read_image(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+    int status=0;
+    PyObject* array=NULL;
+    void* data=NULL;
+    int npy_dtype=0;
+    int dummy=0, fits_read_dtype=0;
+
+    int maxdim=NUMPY_MAX_DIMS; // numpy maximum
+    int datatype=0; // type info for axis
+    int naxis=0; // number of axes
+    int i=0;
+    LONGLONG naxes[NUMPY_MAX_DIMS];;  // size of each axis
+    LONGLONG firstpixels[NUMPY_MAX_DIMS];
+    LONGLONG size=0;
+    npy_intp arrsize=0;
+
+    int anynul=0;
+
+    if (!PyArg_ParseTuple(args, (char*)"iO", &hdunum, &array)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        return NULL;
+    }
+
+    if (fits_get_img_paramll(self->fits, maxdim, &datatype, &naxis, 
+                             naxes, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    // make sure dims match
+    size=0;
+    size = naxes[0];
+    for (i=1; i< naxis; i++) {
+        size *= naxes[i];
+    }
+    arrsize = PyArray_SIZE(array);
+    data = PyArray_DATA(array);
+
+    if (size != arrsize) {
+        PyErr_Format(PyExc_RuntimeError,
+          "Input array size is %ld but on disk array size is %lld", 
+          arrsize, size);
+        return NULL;
+    }
+
+    npy_dtype = PyArray_TYPE(array);
+    npy_to_fits_image_types(npy_dtype, &dummy, &fits_read_dtype);
+
+    for (i=0; i<naxis; i++) {
+        firstpixels[i] = 1;
+    }
+    if (fits_read_pixll(self->fits, fits_read_dtype, firstpixels, size,
+                        0, data, &anynul, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+
+    Py_RETURN_NONE;
+}
+
+static PyObject *
+PyFITSObject_read_raw(struct PyFITSObject* self, PyObject* args) {
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    //fitsfile* fits = self->fits;
+    FITSfile* FITS = self->fits->Fptr;
+    int status = 0;
+    char* filedata;
+    LONGLONG sz;
+    LONGLONG io_pos;
+    PyObject *stringobj;
+
+    // Flush (close & reopen HDU) to make everything consistent
+    ffflus(self->fits, &status);
+    if (status) {
+        PyErr_Format(PyExc_RuntimeError,
+                     "Failed to flush FITS file data to disk; CFITSIO code %i",
+                     status);
+        return NULL;
+    }
+    // Allocate buffer for string
+    sz = FITS->filesize;
+    // Create python string object of requested size, unitialized
+    stringobj = PyBytes_FromStringAndSize(NULL, sz);
+    if (!stringobj) {
+        PyErr_Format(PyExc_RuntimeError,
+                     "Failed to allocate python string object to hold FITS file data: %i bytes",
+                     (int)sz);
+        return NULL;
+    }
+    // Grab pointer to the memory buffer of the python string object
+    filedata = PyBytes_AsString(stringobj);
+    if (!filedata) {
+        Py_DECREF(stringobj);
+        return NULL;
+    }
+    // Remember old file position
+    io_pos = FITS->io_pos;
+    // Seek to beginning of file
+    if (ffseek(FITS, 0)) {
+        Py_DECREF(stringobj);
+        PyErr_Format(PyExc_RuntimeError,
+                     "Failed to seek to beginning of FITS file");
+        return NULL;
+    }
+    // Read into filedata
+    if (ffread(FITS, sz, filedata, &status)) {
+        Py_DECREF(stringobj);
+        PyErr_Format(PyExc_RuntimeError,
+                     "Failed to read file data into memory: CFITSIO code %i",
+                     status);
+        return NULL;
+    }
+    // Seek back to where we were
+    if (ffseek(FITS, io_pos)) {
+        Py_DECREF(stringobj);
+        PyErr_Format(PyExc_RuntimeError,
+                     "Failed to seek back to original FITS file position");
+        return NULL;
+    }
+    return stringobj;
+}
+
+static int get_long_slices(PyObject* fpix_arr,
+                           PyObject* lpix_arr,
+                           PyObject* step_arr,
+                           long** fpix,
+                           long** lpix,
+                           long** step) {
+
+    int i=0;
+    npy_int64* ptr=NULL;
+    npy_intp fsize=0, lsize=0, ssize=0;
+
+    fsize=PyArray_SIZE(fpix_arr);
+    lsize=PyArray_SIZE(lpix_arr);
+    ssize=PyArray_SIZE(step_arr);
+
+    if (lsize != fsize || ssize != fsize) {
+        PyErr_SetString(PyExc_RuntimeError, 
+                        "start/end/step must be same len");
+        return 1;
+    }
+
+    *fpix=calloc(fsize, sizeof(long));
+    *lpix=calloc(fsize, sizeof(long));
+    *step=calloc(fsize, sizeof(long));
+
+    for (i=0;i<fsize;i++) {
+        ptr=PyArray_GETPTR1(fpix_arr, i);
+        (*fpix)[i] = *ptr;
+        ptr=PyArray_GETPTR1(lpix_arr, i);
+        (*lpix)[i] = *ptr;
+        ptr=PyArray_GETPTR1(step_arr, i);
+        (*step)[i] = *ptr;
+    }
+    return 0;
+}
+
+// read an n-dimensional "image" into the input array.  Only minimal checking
+// of the input array is done.
+static PyObject *
+PyFITSObject_read_image_slice(struct PyFITSObject* self, PyObject* args) {
+    int hdunum=0;
+    int hdutype=0;
+    int status=0;
+    PyObject* fpix_arr=NULL;
+    PyObject* lpix_arr=NULL;
+    PyObject* step_arr=NULL;
+    int ignore_scaling=FALSE;
+    PyObject* array=NULL;
+    long* fpix=NULL;
+    long* lpix=NULL;
+    long* step=NULL;
+    void* data=NULL;
+    int npy_dtype=0;
+    int dummy=0, fits_read_dtype=0;
+
+    int anynul=0;
+
+    if (!PyArg_ParseTuple(args, (char*)"iOOOiO",
+                &hdunum, &fpix_arr, &lpix_arr, &step_arr, &ignore_scaling,
+                &array)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        return NULL;
+    }
+
+    if (ignore_scaling == TRUE
+        && fits_set_bscale(self->fits, 1.0, 0.0, &status)) {
+        return NULL;
+    }
+
+    if (get_long_slices(fpix_arr,lpix_arr,step_arr,
+                        &fpix,&lpix,&step)) {
+        return NULL;
+    }
+    data = PyArray_DATA(array);
+
+    npy_dtype = PyArray_TYPE(array);
+    npy_to_fits_image_types(npy_dtype, &dummy, &fits_read_dtype);
+
+    if (fits_read_subset(self->fits, fits_read_dtype, fpix, lpix, step,
+                         0, data, &anynul, &status)) {
+        set_ioerr_string_from_status(status);
+        goto read_image_slice_cleanup;
+    }
+
+read_image_slice_cleanup:
+    free(fpix);
+    free(lpix);
+    free(step);
+
+    if (status != 0) {
+        return NULL;
+    }
+
+    Py_RETURN_NONE;
+}
+
+
+
+static int hierarch_is_string(const char* card)
+{
+    int i=0, is_string_value=1;
+
+    for (i=0; i<78; i++) {
+        if (card[i] == '=') {
+            // we found the equals, now if it is a string we
+            // now exactly where the quote must be
+            if (card[i+2] == '\'') {
+                is_string_value = 1;
+            } else  {
+                is_string_value = 0;
+            }
+        }
+    }
+    return is_string_value;
+}
+
+// read the entire header as list of dicts with name,value,comment and full
+// card
+static PyObject *
+PyFITSObject_read_header(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+    int lcont=0, lcomm=0, ls=0;
+    int tocomp=0;
+    int is_comment_or_history=0, is_blank_key=0;
+    char *longstr=NULL;
+
+    char keyname[FLEN_KEYWORD];
+    char value[FLEN_VALUE];
+    char comment[FLEN_COMMENT];
+    char scomment[FLEN_COMMENT];
+    char card[FLEN_CARD];
+    long is_string_value=0;
+
+    LONGLONG lval=0;
+    double dval=0;
+
+    int nkeys=0, morekeys=0, i=0;
+    int has_equals=0, has_quote=0;
+
+    PyObject* list=NULL;
+    PyObject* dict=NULL;  // to hold the dict for each record
+
+    lcont=strlen("CONTINUE");
+    lcomm=strlen("COMMENT");
+
+    if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+        return NULL;
+    }
+
+    if (self->fits == NULL) {
+        PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+        return NULL;
+    }
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_get_hdrspace(self->fits, &nkeys, &morekeys, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    list=PyList_New(0);
+    for (i=0; i<nkeys; i++) {
+
+        // the full card
+        if (fits_read_record(self->fits, i+1, card, &status)) {
+            Py_XDECREF(list);
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+
+        // this just returns the character string stored in the header; we
+        // can eval in python
+        if (fits_read_keyn(self->fits, i+1, keyname, value, scomment, &status)) {
+            Py_XDECREF(list);
+            set_ioerr_string_from_status(status);
+            return NULL;
+        }
+
+        ls = strlen(keyname);
+        tocomp = (ls < lcont) ? ls : lcont;
+
+        is_blank_key = 0;
+        if (ls == 0) {
+            is_blank_key = 1;
+        } else {
+
+            // skip CONTINUE, we already read the data
+            if (strncmp(keyname,"CONTINUE",tocomp)==0) {
+                continue;
+            }
+
+            if (strncmp(keyname, "COMMENT", tocomp) ==0
+                    || strncmp(keyname, "HISTORY", tocomp )==0) {
+                is_comment_or_history = 1;
+
+            } else {
+                is_comment_or_history = 0;
+
+                if (fits_read_key_longstr(self->fits, keyname, &longstr, comment, &status)) {
+                    Py_XDECREF(list);
+                    set_ioerr_string_from_status(status);
+                    return NULL;
+                }
+
+                if (strncmp(card,"HIERARCH",8)==0) {
+                    if (hierarch_is_string(card)) {
+                        is_string_value=1;
+                    } else {
+                        is_string_value=0;
+                    }
+                } else {
+                    has_equals = (card[8] == '=') ? 1 : 0;
+                    has_quote = (card[10] == '\'') ? 1 : 0;
+                    if (has_equals && has_quote) {
+                        is_string_value=1;
+                    } else {
+                        is_string_value=0;
+                    }
+                }
+            }
+        }
+
+        dict = PyDict_New();
+
+        add_string_to_dict(dict, "card_string", card);
+
+        if (is_blank_key) {
+            add_none_to_dict(dict, "name");
+            add_string_to_dict(dict, "value", "");
+            convert_to_ascii(scomment);
+            add_string_to_dict(dict, "comment", scomment);
+
+        } else if (is_comment_or_history) {
+            // comment or history
+            convert_to_ascii(scomment);
+            add_string_to_dict(dict, "name", keyname);
+            add_string_to_dict(dict, "value", scomment);
+            add_string_to_dict(dict, "comment", scomment);
+
+        } else {
+            convert_keyword_to_allowed_ascii(keyname);
+            add_string_to_dict(dict,"name",keyname);
+            convert_to_ascii(comment);
+            add_string_to_dict(dict,"comment",comment);
+
+            // if not a comment but empty value, put in None
+            tocomp = (ls < lcomm) ? ls : lcomm;
+            // if (!is_string_value && 0==strlen(longstr) && !is_comment) {
+            if (!is_string_value && 0==strlen(longstr)) {
+
+                add_none_to_dict(dict, "value");
+
+            } else {
+
+                if (is_string_value) {
+                    convert_to_ascii(longstr);
+                    add_string_to_dict(dict,"value",longstr);
+                } else if ( longstr[0]=='T' ) {
+                    add_true_to_dict(dict, "value");
+                } else if (longstr[0]=='F') {
+                    add_false_to_dict(dict, "value");
+                } else if ( 
+                           (strchr(longstr,'.') != NULL)
+                           || (strchr(longstr,'E') != NULL)
+                           || (strchr(longstr,'e') != NULL) ) {
+                    // we found a floating point value
+                    fits_read_key(self->fits, TDOUBLE, keyname, &dval, comment, &status);
+                    add_double_to_dict(dict,"value",dval);
+                } else {
+
+                    // we might have found an integer
+                    if (fits_read_key(self->fits,
+                                      TLONGLONG,
+                                      keyname,
+                                      &lval,
+                                      comment,
+                                      &status)) {
+
+                        // something non standard, just store it as a string
+                        convert_to_ascii(longstr);
+                        add_string_to_dict(dict,"value",longstr);
+                        status=0;
+
+                    } else {
+                        add_long_long_to_dict(dict,"value",(long long)lval);
+                    }
+                }
+
+            }
+        }
+
+        free(longstr); longstr=NULL;
+
+        PyList_Append(list, dict);
+        Py_XDECREF(dict);
+
+    }
+
+    return list;
+}
+static PyObject *
+PyFITSObject_write_checksum(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    unsigned long datasum=0;
+    unsigned long hdusum=0;
+
+    PyObject* dict=NULL;
+
+    if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_write_chksum(self->fits, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+    if (fits_get_chksum(self->fits, &datasum, &hdusum, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    dict=PyDict_New();
+    add_long_long_to_dict(dict,"datasum",(long long)datasum);
+    add_long_long_to_dict(dict,"hdusum",(long long)hdusum);
+
+    return dict;
+}
+static PyObject *
+PyFITSObject_verify_checksum(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+
+    int dataok=0, hduok=0;
+
+    PyObject* dict=NULL;
+
+    if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_verify_chksum(self->fits, &dataok, &hduok, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    dict=PyDict_New();
+    add_long_to_dict(dict,"dataok",(long)dataok);
+    add_long_to_dict(dict,"hduok",(long)hduok);
+
+    return dict;
+}
+
+
+
+static PyObject *
+PyFITSObject_where(struct PyFITSObject* self, PyObject* args) {
+    int status=0;
+    int hdunum=0;
+    int hdutype=0;
+    char* expression=NULL;
+
+    LONGLONG nrows=0;
+
+    long firstrow=1;
+    long ngood=0;
+    char* row_status=NULL;
+
+
+    // Indices of rows for which expression is true
+    PyObject* indicesObj=NULL;
+    int ndim=1;
+    npy_intp dims[1];
+    npy_intp* data=NULL;
+    long i=0;
+
+
+    if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &expression)) {
+        return NULL;
+    }
+
+    if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    if (fits_get_num_rowsll(self->fits, &nrows, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    row_status = malloc(nrows*sizeof(char));
+    if (row_status==NULL) {
+        PyErr_SetString(PyExc_MemoryError, "Could not allocate row_status array");
+        return NULL;
+    }
+
+    if (fits_find_rows(self->fits, expression, firstrow, (long) nrows, &ngood, row_status, &status)) {
+        set_ioerr_string_from_status(status);
+        goto where_function_cleanup;
+    }
+
+    dims[0] = ngood;
+    indicesObj = PyArray_EMPTY(ndim, dims, NPY_INTP, 0);
+    if (indicesObj == NULL) {
+        PyErr_SetString(PyExc_MemoryError, "Could not allocate index array");
+        goto where_function_cleanup;
+    }
+
+    if (ngood > 0) {
+        data = PyArray_DATA(indicesObj);
+
+        for (i=0; i<nrows; i++) {
+            if (row_status[i]) {
+                *data = (npy_intp) i;
+                data++;
+            }
+        }
+    }
+where_function_cleanup:
+    free(row_status);
+    return indicesObj;
+}
+
+// generic functions, not tied to an object
+
+static PyObject *
+PyFITS_cfitsio_version(void) {
+    float version=0;
+    fits_get_version(&version);
+    return PyFloat_FromDouble((double)version);
+}
+
+static PyObject *
+PyFITS_cfitsio_use_standard_strings(void) {
+    if ( fits_use_standard_strings() ) {
+        Py_RETURN_TRUE;
+    } else {
+        Py_RETURN_FALSE;
+    }
+}
+
+
+/*
+
+'C',              'L',     'I',     'F'             'X'
+character string, logical, integer, floating point, complex
+
+*/
+
+static PyObject *
+PyFITS_get_keytype(PyObject* self, PyObject* args) {
+
+    int status=0;
+    char* card=NULL;
+    char dtype[2]={0};
+
+    if (!PyArg_ParseTuple(args, (char*)"s", &card)) {
+        return NULL;
+    }
+
+
+    if (fits_get_keytype(card, dtype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    } else {
+        return Py_BuildValue("s", dtype);
+    }
+}
+static PyObject *
+PyFITS_get_key_meta(PyObject* self, PyObject* args) {
+
+    int status=0;
+    char* card=NULL;
+    char dtype[2]={0};
+    int keyclass=0;
+
+    if (!PyArg_ParseTuple(args, (char*)"s", &card)) {
+        return NULL;
+    }
+
+
+    keyclass=fits_get_keyclass(card);
+
+    if (fits_get_keytype(card, dtype, &status)) {
+        set_ioerr_string_from_status(status);
+        return NULL;
+    }
+
+    return Py_BuildValue("is", keyclass, dtype);
+
+}
+
+/*
+
+    note the special first four comment fields will not be called comment but
+    structural!  That will cause an exception to be raised, so the card should
+    be checked before calling this function
+
+*/
+
+static PyObject *
+PyFITS_parse_card(PyObject* self, PyObject* args) {
+
+    int status=0;
+    char name[FLEN_VALUE]={0};
+    char value[FLEN_VALUE]={0};
+    char comment[FLEN_COMMENT]={0};
+    int keylen=0;
+    int keyclass=0;
+    int is_undefined=0;
+
+    char* card=NULL;
+    char dtype[2]={0};
+    PyObject* output=NULL;
+
+    if (!PyArg_ParseTuple(args, (char*)"s", &card)) {
+        goto bail;
+    }
+
+    keyclass=fits_get_keyclass(card);
+
+    // only proceed if not comment or history, but note the special first four
+    // comment fields will not be called comment but structural!  That will
+    // cause an exception to be raised, so the card should be checked before
+    // calling this function
+
+    if (keyclass != TYP_COMM_KEY && keyclass != TYP_CONT_KEY) {
+
+        if (fits_get_keyname(card, name, &keylen, &status)) {
+            set_ioerr_string_from_status(status);
+            goto bail;
+        }
+        if (fits_parse_value(card, value, comment, &status)) {
+            set_ioerr_string_from_status(status);
+            goto bail;
+        }
+        if (fits_get_keytype(value, dtype, &status)) {
+
+            if (status == VALUE_UNDEFINED) {
+                is_undefined=1;
+                status=0;
+            } else {
+                set_ioerr_string_from_status(status);
+                goto bail;
+            }
+        }
+    }
+
+bail:
+    if (status != 0) {
+        return NULL;
+    }
+
+    if (is_undefined) {
+        output=Py_BuildValue("isss", keyclass, name, dtype, comment);
+    } else {
+        output=Py_BuildValue("issss", keyclass, name, value, dtype, comment);
+    }
+    return output;
+}
+
+
+
+static PyMethodDef PyFITSObject_methods[] = {
+    {"filename",             (PyCFunction)PyFITSObject_filename,             METH_VARARGS,  "filename\n\nReturn the name of the file."},
+
+    {"where",                (PyCFunction)PyFITSObject_where,                METH_VARARGS,  "where\n\nReturn an index array where the input expression evaluates to true."},
+
+    {"movabs_hdu",           (PyCFunction)PyFITSObject_movabs_hdu,           METH_VARARGS,  "movabs_hdu\n\nMove to the specified HDU."},
+    {"movnam_hdu",           (PyCFunction)PyFITSObject_movnam_hdu,           METH_VARARGS,  "movnam_hdu\n\nMove to the specified HDU by name and return the hdu number."},
+
+    {"get_hdu_name_version",         (PyCFunction)PyFITSObject_get_hdu_name_version,         METH_VARARGS,  "get_hdu_name_version\n\nReturn a tuple (extname,extvers)."},
+    {"get_hdu_info",         (PyCFunction)PyFITSObject_get_hdu_info,         METH_VARARGS,  "get_hdu_info\n\nReturn a dict with info about the specified HDU."},
+    {"read_raw",             (PyCFunction)PyFITSObject_read_raw,             METH_NOARGS,  "read_raw\n\nRead the entire raw contents of the FITS file, returning a python string."},
+    {"read_image",           (PyCFunction)PyFITSObject_read_image,           METH_VARARGS,  "read_image\n\nRead the entire n-dimensional image array.  No checking of array is done."},
+    {"read_image_slice",     (PyCFunction)PyFITSObject_read_image_slice,     METH_VARARGS,  "read_image_slice\n\nRead an image slice."},
+    {"read_column",          (PyCFunction)PyFITSObject_read_column,          METH_VARARGS,  "read_column\n\nRead the column into the input array.  No checking of array is done."},
+    {"read_var_column_as_list",          (PyCFunction)PyFITSObject_read_var_column_as_list,          METH_VARARGS,  "read_var_column_as_list\n\nRead the variable length column as a list of arrays."},
+    {"read_columns_as_rec",  (PyCFunction)PyFITSObject_read_columns_as_rec,  METH_VARARGS,  "read_columns_as_rec\n\nRead the specified columns into the input rec array.  No checking of array is done."},
+    {"read_columns_as_rec_byoffset",  (PyCFunction)PyFITSObject_read_columns_as_rec_byoffset,  METH_VARARGS,  "read_columns_as_rec_byoffset\n\nRead the specified columns into the input rec array at the specified offsets.  No checking of array is done."},
+    {"read_rows_as_rec",     (PyCFunction)PyFITSObject_read_rows_as_rec,     METH_VARARGS,  "read_rows_as_rec\n\nRead the subset of rows into the input rec array.  No checking of array is done."},
+    {"read_as_rec",          (PyCFunction)PyFITSObject_read_as_rec,          METH_VARARGS,  "read_as_rec\n\nRead a set of rows into the input rec array.  No significant checking of array is done."},
+    {"read_header",          (PyCFunction)PyFITSObject_read_header,          METH_VARARGS | METH_VARARGS,  "read_header\n\nRead the entire header as a list of dictionaries."},
+
+    {"create_image_hdu",     (PyCFunction)PyFITSObject_create_image_hdu,     METH_VARARGS | METH_KEYWORDS, "create_image_hdu\n\nWrite the input image to a new extension."},
+    {"create_table_hdu",     (PyCFunction)PyFITSObject_create_table_hdu,     METH_VARARGS | METH_KEYWORDS, "create_table_hdu\n\nCreate a new table with the input parameters."},
+    {"insert_col",           (PyCFunction)PyFITSObject_insert_col,           METH_VARARGS | METH_KEYWORDS, "insert_col\n\nInsert a new column."},
+
+    {"write_checksum",       (PyCFunction)PyFITSObject_write_checksum,       METH_VARARGS,  "write_checksum\n\nCompute and write the checksums into the header."},
+    {"verify_checksum",      (PyCFunction)PyFITSObject_verify_checksum,      METH_VARARGS,  "verify_checksum\n\nReturn a dict with dataok and hduok."},
+
+    {"reshape_image",          (PyCFunction)PyFITSObject_reshape_image,          METH_VARARGS,  "reshape_image\n\nReshape the image."},
+    {"write_image",          (PyCFunction)PyFITSObject_write_image,          METH_VARARGS,  "write_image\n\nWrite the input image to a new extension."},
+    //{"write_column",         (PyCFunction)PyFITSObject_write_column,         METH_VARARGS | METH_KEYWORDS, "write_column\n\nWrite a column into the specified hdu."},
+    {"write_columns",        (PyCFunction)PyFITSObject_write_columns,        METH_VARARGS | METH_KEYWORDS, "write_columns\n\nWrite columns into the specified hdu."},
+    {"write_var_column",     (PyCFunction)PyFITSObject_write_var_column,     METH_VARARGS | METH_KEYWORDS, "write_var_column\n\nWrite a variable length column into the specified hdu from an object array."},
+    {"write_record",         (PyCFunction)PyFITSObject_write_record,     METH_VARARGS,  "write_record\n\nWrite a header card."},
+    {"write_string_key",     (PyCFunction)PyFITSObject_write_string_key,     METH_VARARGS,  "write_string_key\n\nWrite a string key into the specified HDU."},
+    {"write_double_key",     (PyCFunction)PyFITSObject_write_double_key,     METH_VARARGS,  "write_double_key\n\nWrite a double key into the specified HDU."},
+
+    {"write_long_long_key",       (PyCFunction)PyFITSObject_write_long_long_key,       METH_VARARGS,  "write_long_long_key\n\nWrite a long long key into the specified HDU."},
+    {"write_logical_key",    (PyCFunction)PyFITSObject_write_logical_key,    METH_VARARGS,  "write_logical_key\n\nWrite a logical key into the specified HDU."},
+
+    {"write_comment",        (PyCFunction)PyFITSObject_write_comment,        METH_VARARGS,  "write_comment\n\nWrite a comment into the header of the specified HDU."},
+    {"write_history",        (PyCFunction)PyFITSObject_write_history,        METH_VARARGS,  "write_history\n\nWrite history into the header of the specified HDU."},
+    {"write_continue",       (PyCFunction)PyFITSObject_write_continue,        METH_VARARGS,  "write_continue\n\nWrite contineu into the header of the specified HDU."},
+
+    {"write_undefined_key",       (PyCFunction)PyFITSObject_write_undefined_key,        METH_VARARGS,  "write_undefined_key\n\nWrite a key without a value field into the header of the specified HDU."},
+
+    {"insert_rows",        (PyCFunction)PyFITSObject_insert_rows,        METH_VARARGS,  "Insert blank rows"},
+
+    {"delete_row_range",        (PyCFunction)PyFITSObject_delete_row_range,        METH_VARARGS,  "Delete a range of rows"},
+    {"delete_rows",        (PyCFunction)PyFITSObject_delete_rows,        METH_VARARGS,  "Delete a set of rows"},
+
+    {"close",                (PyCFunction)PyFITSObject_close,                METH_VARARGS,  "close\n\nClose the fits file."},
+    {NULL}  /* Sentinel */
+};
+
+static PyTypeObject PyFITSType = {
+#if PY_MAJOR_VERSION >= 3
+    PyVarObject_HEAD_INIT(NULL, 0)
+#else
+    PyObject_HEAD_INIT(NULL)
+    0,                         /*ob_size*/
+#endif
+    "_fitsio.FITS",             /*tp_name*/
+    sizeof(struct PyFITSObject), /*tp_basicsize*/
+    0,                         /*tp_itemsize*/
+    (destructor)PyFITSObject_dealloc, /*tp_dealloc*/
+    0,                         /*tp_print*/
+    0,                         /*tp_getattr*/
+    0,                         /*tp_setattr*/
+    0,                         /*tp_compare*/
+    //0,                         /*tp_repr*/
+    (reprfunc)PyFITSObject_repr,                         /*tp_repr*/
+    0,                         /*tp_as_number*/
+    0,                         /*tp_as_sequence*/
+    0,                         /*tp_as_mapping*/
+    0,                         /*tp_hash */
+    0,                         /*tp_call*/
+    0,                         /*tp_str*/
+    0,                         /*tp_getattro*/
+    0,                         /*tp_setattro*/
+    0,                         /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+    "FITSIO Class",           /* tp_doc */
+    0,                     /* tp_traverse */
+    0,                     /* tp_clear */
+    0,                     /* tp_richcompare */
+    0,                     /* tp_weaklistoffset */
+    0,                     /* tp_iter */
+    0,                     /* tp_iternext */
+    PyFITSObject_methods,             /* tp_methods */
+    0,             /* tp_members */
+    0,                         /* tp_getset */
+    0,                         /* tp_base */
+    0,                         /* tp_dict */
+    0,                         /* tp_descr_get */
+    0,                         /* tp_descr_set */
+    0,                         /* tp_dictoffset */
+    //0,     /* tp_init */
+    (initproc)PyFITSObject_init,      /* tp_init */
+    0,                         /* tp_alloc */
+    //PyFITSObject_new,                 /* tp_new */
+    PyType_GenericNew,                 /* tp_new */
+};
+
+
+static PyMethodDef fitstype_methods[] = {
+    {"cfitsio_version",      (PyCFunction)PyFITS_cfitsio_version,      METH_NOARGS,  "cfitsio_version\n\nReturn the cfitsio version."},
+    {"cfitsio_use_standard_strings",      (PyCFunction)PyFITS_cfitsio_use_standard_strings,      METH_NOARGS,  "cfitsio_use_standard_strings\n\nReturn True if using string code that matches the FITS standard."},
+    {"parse_card",      (PyCFunction)PyFITS_parse_card,      METH_VARARGS,  "parse_card\n\nparse the card to get the key name, value (as a string), data type and comment."},
+    {"get_keytype",      (PyCFunction)PyFITS_get_keytype,      METH_VARARGS,  "get_keytype\n\nparse the card to get the key type."},
+    {"get_key_meta",      (PyCFunction)PyFITS_get_key_meta,      METH_VARARGS,  "get_key_meta\n\nparse the card to get key metadata (keyclass,dtype)."},
+    {NULL}  /* Sentinel */
+};
+
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "_fitsio_wrap",      /* m_name */
+        "Defines the FITS class and some methods",  /* m_doc */
+        -1,                  /* m_size */
+        fitstype_methods,    /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+#endif
+
+
+#ifndef PyMODINIT_FUNC  /* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+PyInit__fitsio_wrap(void) 
+#else
+init_fitsio_wrap(void) 
+#endif
+{
+    PyObject* m;
+
+    PyFITSType.tp_new = PyType_GenericNew;
+
+#if PY_MAJOR_VERSION >= 3
+    if (PyType_Ready(&PyFITSType) < 0) {
+        return NULL;
+    }
+    m = PyModule_Create(&moduledef);
+    if (m==NULL) {
+        return NULL;
+    }
+
+#else
+    if (PyType_Ready(&PyFITSType) < 0) {
+        return;
+    }
+    m = Py_InitModule3("_fitsio_wrap", fitstype_methods, "Define FITS type and methods.");
+    if (m==NULL) {
+        return;
+    }
+#endif
+
+    Py_INCREF(&PyFITSType);
+    PyModule_AddObject(m, "FITS", (PyObject *)&PyFITSType);
+
+    import_array();
+#if PY_MAJOR_VERSION >= 3
+    return m;
+#endif
+}
diff --git a/fitsio/fitslib.py b/fitsio/fitslib.py
new file mode 100644 (file)
index 0000000..2211d4a
--- /dev/null
@@ -0,0 +1,1838 @@
+"""
+fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+  Copyright (C) 2011  Erin Sheldon, BNL.  erin dot sheldon at gmail dot com
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""
+from __future__ import with_statement, print_function
+import os
+import numpy
+
+from . import _fitsio_wrap
+from .util import IS_PY3, mks, array_to_native, isstring
+from .header import FITSHDR
+from .hdu import (
+    ANY_HDU, IMAGE_HDU, BINARY_TBL, ASCII_TBL,
+    ImageHDU, AsciiTableHDU, TableHDU,
+    _table_npy2fits_form, _npy2fits, _hdu_type_map)
+
+# for python3 compat
+if IS_PY3:
+    xrange = range
+
+
+READONLY = 0
+READWRITE = 1
+
+NOCOMPRESS = 0
+RICE_1 = 11
+GZIP_1 = 21
+GZIP_2 = 22
+PLIO_1 = 31
+HCOMPRESS_1 = 41
+
+NO_DITHER = -1
+SUBTRACTIVE_DITHER_1 = 1
+SUBTRACTIVE_DITHER_2 = 2
+
+# defaults follow fpack
+DEFAULT_QLEVEL = 4.0
+DEFAULT_QMETHOD = 'SUBTRACTIVE_DITHER_1'
+DEFAULT_HCOMP_SCALE = 0.0
+
+
+def read(filename, ext=None, extver=None, columns=None, rows=None,
+         header=False, case_sensitive=False, upper=False, lower=False,
+         vstorage='fixed', verbose=False, trim_strings=False, **keys):
+    """
+    Convenience function to read data from the specified FITS HDU
+
+    By default, all data are read.  For tables, send columns= and rows= to
+    select subsets of the data.  Table data are read into a recarray; use a
+    FITS object and read_column() to get a single column as an ordinary array.
+    For images, create a FITS object and use slice notation to read subsets.
+
+    Under the hood, a FITS object is constructed and data are read using
+    an associated FITSHDU object.
+
+    parameters
+    ----------
+    filename: string
+        A filename.
+    ext: number or string, optional
+        The extension.  Either the numerical extension from zero
+        or a string extension name. If not sent, data is read from
+        the first HDU that has data.
+    extver: integer, optional
+        FITS allows multiple extensions to have the same name (extname).  These
+        extensions can optionally specify an EXTVER version number in the
+        header.  Send extver= to select a particular version.  If extver is not
+        sent, the first one will be selected.  If ext is an integer, the extver
+        is ignored.
+    columns: list or array, optional
+        An optional set of columns to read from table HDUs.  Default is to
+        read all.  Can be string or number.
+    rows: optional
+        An optional list of rows to read from table HDUS.  Default is to
+        read all.
+    header: bool, optional
+        If True, read the FITS header and return a tuple (data,header)
+        Default is False.
+    case_sensitive: bool, optional
+        Match column names and extension names with case-sensitivity.  Default
+        is False.
+    lower: bool, optional
+        If True, force all columns names to lower case in output. Default is
+        False.
+    upper: bool, optional
+        If True, force all columns names to upper case in output. Default is
+        False.
+    vstorage: string, optional
+        Set the default method to store variable length columns.  Can be
+        'fixed' or 'object'.  See docs on fitsio.FITS for details. Default is
+        'fixed'.
+    trim_strings: bool, optional
+        If True, trim trailing spaces from strings. Will over-ride the
+        trim_strings= keyword from constructor.
+    verbose: bool, optional
+        If True, print more info when doing various FITS operations.
+    """
+
+    if keys:
+        import warnings
+        warnings.warn(
+            "The keyword arguments '%s' are being ignored! This warning "
+            "will be an error in a future version of `fitsio`!" % keys,
+            DeprecationWarning, stacklevel=2)
+
+    kwargs = {
+        'lower': lower,
+        'upper': upper,
+        'vstorage': vstorage,
+        'case_sensitive': case_sensitive,
+        'verbose': verbose,
+        'trim_strings': trim_strings
+    }
+
+    read_kwargs = {}
+    if columns is not None:
+        read_kwargs['columns'] = columns
+    if rows is not None:
+        read_kwargs['rows'] = rows
+
+    with FITS(filename, **kwargs) as fits:
+
+        if ext is None:
+            for i in xrange(len(fits)):
+                if fits[i].has_data():
+                    ext = i
+                    break
+            if ext is None:
+                raise IOError("No extensions have data")
+
+        item = _make_item(ext, extver=extver)
+
+        data = fits[item].read(**read_kwargs)
+        if header:
+            h = fits[item].read_header()
+            return data, h
+        else:
+            return data
+
+
+def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys):
+    """
+    Convenience function to read the header from the specified FITS HDU
+
+    The FITSHDR allows access to the values and comments by name and
+    number.
+
+    parameters
+    ----------
+    filename: string
+        A filename.
+    ext: number or string, optional
+        The extension.  Either the numerical extension from zero
+        or a string extension name. Default read primary header.
+    extver: integer, optional
+        FITS allows multiple extensions to have the same name (extname).  These
+        extensions can optionally specify an EXTVER version number in the
+        header.  Send extver= to select a particular version.  If extver is not
+        sent, the first one will be selected.  If ext is an integer, the extver
+        is ignored.
+    case_sensitive: bool, optional
+        Match extension names with case-sensitivity.  Default is False.
+    """
+
+    if keys:
+        import warnings
+        warnings.warn(
+            "The keyword arguments '%s' are being ignored! This warning "
+            "will be an error in a future version of `fitsio`!" % keys,
+            DeprecationWarning, stacklevel=2)
+
+    filename = extract_filename(filename)
+
+    dont_create = 0
+    try:
+        hdunum = ext+1
+    except TypeError:
+        hdunum = None
+
+    _fits = _fitsio_wrap.FITS(filename, READONLY, dont_create)
+
+    if hdunum is None:
+        extname = mks(ext)
+        if extver is None:
+            extver_num = 0
+        else:
+            extver_num = extver
+
+        if not case_sensitive:
+            # the builtin movnam_hdu is not case sensitive
+            hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num)
+        else:
+            # for case sensitivity we'll need to run through
+            # all the hdus
+            found = False
+            current_ext = 0
+            while True:
+                hdunum = current_ext+1
+                try:
+                    hdu_type = _fits.movabs_hdu(hdunum)  # noqa - not used
+                    name, vers = _fits.get_hdu_name_version(hdunum)
+                    if name == extname:
+                        if extver is None:
+                            # take the first match
+                            found = True
+                            break
+                        else:
+                            if extver_num == vers:
+                                found = True
+                                break
+                except OSError:
+                    break
+
+                current_ext += 1
+
+            if not found:
+                raise IOError(
+                    'hdu not found: %s (extver %s)' % (extname, extver))
+
+    return FITSHDR(_fits.read_header(hdunum))
+
+
+def read_scamp_head(fname, header=None):
+    """
+    read a SCAMP .head file as a fits header FITSHDR object
+
+    parameters
+    ----------
+    fname: string
+        The path to the SCAMP .head file
+
+    header: FITSHDR, optional
+        Optionally combine the header with the input one. The input can
+        be any object convertable to a FITSHDR object
+
+    returns
+    -------
+    header: FITSHDR
+        A fits header object of type FITSHDR
+    """
+
+    with open(fname) as fobj:
+        lines = fobj.readlines()
+
+    lines = [l.strip() for l in lines if l[0:3] != 'END']
+
+    # if header is None an empty FITSHDR is created
+    hdr = FITSHDR(header)
+
+    for l in lines:
+        hdr.add_record(l)
+
+    return hdr
+
+
+def _make_item(ext, extver=None):
+    if extver is not None:
+        # e
+        item = (ext, extver)
+    else:
+        item = ext
+
+    return item
+
+
+def write(filename, data, extname=None, extver=None, header=None,
+          clobber=False, ignore_empty=False, units=None, table_type='binary',
+          names=None, write_bitcols=False, compress=None, tile_dims=None,
+          qlevel=DEFAULT_QLEVEL,
+          qmethod=DEFAULT_QMETHOD,
+          hcomp_scale=DEFAULT_HCOMP_SCALE,
+          hcomp_smooth=False,
+          **keys):
+    """
+    Convenience function to create a new HDU and write the data.
+
+    Under the hood, a FITS object is constructed.  If you want to append rows
+    to an existing HDU, or modify data in an HDU, please construct a FITS
+    object.
+
+    parameters
+    ----------
+    filename: string
+        A filename.
+    data: numpy.ndarray or recarray
+        Either a normal n-dimensional array or a recarray.  Images are written
+        to a new IMAGE_HDU and recarrays are written to BINARY_TBl or
+        ASCII_TBL hdus.
+    extname: string, optional
+        An optional name for the new header unit.
+    extver: integer, optional
+        FITS allows multiple extensions to have the same name (extname).
+        These extensions can optionally specify an EXTVER version number in
+        the header.  Send extver= to set a particular version, which will
+        be represented in the header with keyname EXTVER.  The extver must
+        be an integer > 0.  If extver is not sent, the first one will be
+        selected.  If ext is an integer, the extver is ignored.
+    header: FITSHDR, list, dict, optional
+        A set of header keys to write. The keys are written before the data
+        is written to the table, preventing a resizing of the table area.
+
+        Can be one of these:
+            - FITSHDR object
+            - list of dictionaries containing 'name','value' and optionally
+              a 'comment' field; the order is preserved.
+            - a dictionary of keyword-value pairs; no comments are written
+              in this case, and the order is arbitrary.
+        Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+    clobber: bool, optional
+        If True, overwrite any existing file. Default is to append
+        a new extension on existing files.
+    ignore_empty: bool, optional
+        Default False.  Unless set to True, only allow
+        empty HDUs in the zero extension.
+
+    table-only keywords
+    -------------------
+    units: list
+        A list of strings representing units for each column.
+    table_type: string, optional
+        Either 'binary' or 'ascii', default 'binary'
+        Matching is case-insensitive
+    write_bitcols: bool, optional
+        Write boolean arrays in the FITS bitcols format, default False
+    names: list, optional
+        If data is a list of arrays, you must send `names` as a list
+        of names or column numbers.
+
+    image-only keywords
+    -------------------
+    compress: string, optional
+        A string representing the compression algorithm for images,
+        default None.
+        Can be one of
+           'RICE'
+           'GZIP'
+           'GZIP_2'
+           'PLIO' (no unsigned or negative integers)
+           'HCOMPRESS'
+        (case-insensitive) See the cfitsio manual for details.
+    tile_dims: tuple of ints, optional
+        The size of the tiles used to compress images.
+    qlevel: float, optional
+        Quantization level for floating point data.  Lower generally result in
+        more compression, we recommend one reads the FITS standard or cfitsio
+        manual to fully understand the effects of quantization.  None or 0
+        means no quantization, and for gzip also implies lossless.  Default is
+        4.0 which follows the fpack defaults
+    qmethod: string or int
+        The quantization method as string or integer.
+            'NO_DITHER' or fitsio.NO_DITHER (-1)
+               No dithering is performed
+            'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+                Standard dithering
+            'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+                Preserves zeros
+
+        Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+
+    hcomp_scale: float
+        Scale value for HCOMPRESS, 0.0 means lossless compression. Default is 0.0
+        following the fpack defaults.
+    hcomp_smooth: bool
+        If True, apply smoothing when decompressing.  Default False
+    """
+    if keys:
+        import warnings
+        warnings.warn(
+            "The keyword arguments '%s' are being ignored! This warning "
+            "will be an error in a future version of `fitsio`!" % keys,
+            DeprecationWarning, stacklevel=2)
+
+    kwargs = {
+        'clobber': clobber,
+        'ignore_empty': ignore_empty
+    }
+    with FITS(filename, 'rw', **kwargs) as fits:
+        fits.write(
+            data,
+            table_type=table_type,
+            units=units,
+            extname=extname,
+            extver=extver,
+            header=header,
+            names=names,
+            write_bitcols=write_bitcols,
+
+            compress=compress,
+            tile_dims=tile_dims,
+            qlevel=qlevel,
+            qmethod=qmethod,
+            hcomp_scale=hcomp_scale,
+            hcomp_smooth=hcomp_smooth,
+        )
+
+
+class FITS(object):
+    """
+    A class to read and write FITS images and tables.
+
+    This class uses the cfitsio library for almost all relevant work.
+
+    parameters
+    ----------
+    filename: string
+        The filename to open.
+    mode: int/string, optional
+        The mode, either a string or integer.
+        For reading only
+            'r' or 0
+        For reading and writing
+            'rw' or 1
+        You can also use fitsio.READONLY and fitsio.READWRITE.
+
+        Default is 'r'
+    clobber: bool, optional
+        If the mode is READWRITE, and clobber=True, then remove any existing
+        file before opening.
+    case_sensitive: bool, optional
+        Match column names and extension names with case-sensitivity.  Default
+        is False.
+    lower: bool, optional
+        If True, force all columns names to lower case in output
+    upper: bool, optional
+        If True, force all columns names to upper case in output
+    vstorage: string, optional
+        A string describing how, by default, to store variable length columns
+        in the output array.  This can be over-ridden when reading by using the
+        using vstorage keyword to the individual read methods.  The options are
+
+            'fixed': Use a fixed length field in the array, with
+                dimensions equal to the max possible size for column.
+                Arrays are padded with zeros.
+            'object': Use an object for the field in the array.
+                Each element will then be an array of the right type,
+                but only using the memory needed to hold that element.
+
+        Default is 'fixed'.  The rationale is that this is the option
+            of 'least surprise'
+    iter_row_buffer: integer
+        Number of rows to buffer when iterating over table HDUs.
+        Default is 1.
+    ignore_empty: bool, optional
+        Default False.  Unless set to True, only allow
+        empty HDUs in the zero extension.
+    verbose: bool, optional
+        If True, print more info when doing various FITS operations.
+
+    See the docs at https://github.com/esheldon/fitsio
+    """
+    def __init__(self, filename, mode='r', lower=False, upper=False,
+                 trim_strings=False, vstorage='fixed', case_sensitive=False,
+                 iter_row_buffer=1, write_bitcols=False, ignore_empty=False,
+                 verbose=False, clobber=False, **keys):
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        self.lower = lower
+        self.upper = upper
+        self.trim_strings = trim_strings
+        self.vstorage = vstorage
+        self.case_sensitive = case_sensitive
+        self.iter_row_buffer = iter_row_buffer
+        self.write_bitcols = write_bitcols
+        filename = extract_filename(filename)
+        self._filename = filename
+
+        # self.mode=keys.get('mode','r')
+        self.mode = mode
+        self.ignore_empty = ignore_empty
+
+        self.verbose = verbose
+
+        if self.mode not in _int_modemap:
+            raise IOError("mode should be one of 'r', 'rw', "
+                          "READONLY,READWRITE")
+
+        self.charmode = _char_modemap[self.mode]
+        self.intmode = _int_modemap[self.mode]
+
+        # Will not test existence when reading, let cfitsio
+        # do the test and report an error.  This allows opening
+        # urls etc.
+        create = 0
+        if self.mode in [READWRITE, 'rw']:
+            if clobber:
+                create = 1
+                if filename[0] != '!':
+                    filename = '!' + filename
+            else:
+                if os.path.exists(filename):
+                    create = 0
+                else:
+                    create = 1
+
+        self._did_create = (create == 1)
+        self._FITS = _fitsio_wrap.FITS(filename, self.intmode, create)
+
+    def close(self):
+        """
+        Close the fits file and set relevant metadata to None
+        """
+        if hasattr(self, '_FITS'):
+            if self._FITS is not None:
+                self._FITS.close()
+                self._FITS = None
+        self._filename = None
+        self.mode = None
+        self.charmode = None
+        self.intmode = None
+        self.hdu_list = None
+        self.hdu_map = None
+
+    def movabs_ext(self, ext):
+        """
+        Move to the indicated zero-offset extension.
+
+        In general, it is not necessary to use this method explicitly.
+        """
+        return self._FITS.movabs_hdu(ext+1)
+
+    def movabs_hdu(self, hdunum):
+        """
+        Move to the indicated one-offset hdu number.
+
+        In general, it is not necessary to use this method explicitly.
+        """
+        return self._FITS.movabs_hdu(hdunum)
+
+    def movnam_ext(self, extname, hdutype=ANY_HDU, extver=0):
+        """
+        Move to the indicated extension by name
+
+        In general, it is not necessary to use this method explicitly.
+
+        returns the zero-offset extension number
+        """
+        extname = mks(extname)
+        hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
+        return hdu-1
+
+    def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0):
+        """
+        Move to the indicated HDU by name
+
+        In general, it is not necessary to use this method explicitly.
+
+        returns the one-offset extension number
+        """
+        extname = mks(extname)
+        hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
+        return hdu
+
+    def reopen(self):
+        """
+        close and reopen the fits file with the same mode
+        """
+        self._FITS.close()
+        del self._FITS
+        self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0)
+        self.update_hdu_list()
+
+    def write(self, data, units=None, extname=None, extver=None,
+              compress=None,
+              tile_dims=None,
+              qlevel=DEFAULT_QLEVEL,
+              qmethod=DEFAULT_QMETHOD,
+              hcomp_scale=DEFAULT_HCOMP_SCALE,
+              hcomp_smooth=False,
+              header=None, names=None,
+              table_type='binary', write_bitcols=False, **keys):
+        """
+        Write the data to a new HDU.
+
+        This method is a wrapper.  If this is an IMAGE_HDU, write_image is
+        called, otherwise write_table is called.
+
+        parameters
+        ----------
+        data: ndarray
+            An n-dimensional image or an array with fields.
+        extname: string, optional
+            An optional extension name.
+        extver: integer, optional
+            FITS allows multiple extensions to have the same name (extname).
+            These extensions can optionally specify an EXTVER version number in
+            the header.  Send extver= to set a particular version, which will
+            be represented in the header with keyname EXTVER.  The extver must
+            be an integer > 0.  If extver is not sent, the first one will be
+            selected.  If ext is an integer, the extver is ignored.
+        header: FITSHDR, list, dict, optional
+            A set of header keys to write. Can be one of these:
+                - FITSHDR object
+                - list of dictionaries containing 'name','value' and optionally
+                  a 'comment' field; the order is preserved.
+                - a dictionary of keyword-value pairs; no comments are written
+                  in this case, and the order is arbitrary.
+            Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+
+        image-only keywords
+        -------------------
+        compress: string, optional
+            A string representing the compression algorithm for images,
+            default None.
+            Can be one of
+                'RICE'
+                'GZIP'
+                'GZIP_2'
+                'PLIO' (no unsigned or negative integers)
+                'HCOMPRESS'
+            (case-insensitive) See the cfitsio manual for details.
+        tile_dims: tuple of ints, optional
+            The size of the tiles used to compress images.
+        qlevel: float, optional
+            Quantization level for floating point data.  Lower generally result in
+            more compression, we recommend one reads the FITS standard or cfitsio
+            manual to fully understand the effects of quantization.  None or 0
+            means no quantization, and for gzip also implies lossless.  Default is
+            4.0 which follows the fpack defaults
+        qmethod: string or int
+            The quantization method as string or integer.
+                'NO_DITHER' or fitsio.NO_DITHER (-1)
+                   No dithering is performed
+                'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+                    Standard dithering
+                'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+                    Preserves zeros
+
+            Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+
+        hcomp_scale: float
+            Scale value for HCOMPRESS, 0.0 means lossless compression. Default is 0.0
+            following the fpack defaults.
+        hcomp_smooth: bool
+            If True, apply smoothing when decompressing.  Default False
+
+        table-only keywords
+        -------------------
+        units: list/dec, optional:
+            A list of strings with units for each column.
+        table_type: string, optional
+            Either 'binary' or 'ascii', default 'binary'
+            Matching is case-insensitive
+        write_bitcols: bool, optional
+            Write boolean arrays in the FITS bitcols format, default False
+        names: list, optional
+            If data is a list of arrays, you must send `names` as a list
+            of names or column numbers.
+
+        restrictions
+        ------------
+        The File must be opened READWRITE
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        isimage = False
+        if data is None:
+            isimage = True
+        elif isinstance(data, numpy.ndarray):
+            if data.dtype.fields == None:  # noqa - probably should be is None
+                isimage = True
+
+        if isimage:
+            self.write_image(data, extname=extname, extver=extver,
+                             compress=compress,
+                             tile_dims=tile_dims,
+                             qlevel=qlevel,
+                             qmethod=qmethod,
+                             hcomp_scale=hcomp_scale,
+                             hcomp_smooth=hcomp_smooth,
+                             header=header)
+        else:
+            self.write_table(data, units=units,
+                             extname=extname, extver=extver, header=header,
+                             names=names,
+                             table_type=table_type,
+                             write_bitcols=write_bitcols)
+
+    def write_image(self, img, extname=None, extver=None,
+                    compress=None, tile_dims=None,
+                    qlevel=DEFAULT_QLEVEL,
+                    qmethod=DEFAULT_QMETHOD,
+                    hcomp_scale=DEFAULT_HCOMP_SCALE,
+                    hcomp_smooth=False,
+                    header=None):
+        """
+        Create a new image extension and write the data.
+
+        parameters
+        ----------
+        img: ndarray
+            An n-dimensional image.
+        extname: string, optional
+            An optional extension name.
+        extver: integer, optional
+            FITS allows multiple extensions to have the same name (extname).
+            These extensions can optionally specify an EXTVER version number in
+            the header.  Send extver= to set a particular version, which will
+            be represented in the header with keyname EXTVER.  The extver must
+            be an integer > 0.  If extver is not sent, the first one will be
+            selected.  If ext is an integer, the extver is ignored.
+        compress: string, optional
+            A string representing the compression algorithm for images,
+            default None.
+            Can be one of
+                'RICE'
+                'GZIP'
+                'GZIP_2'
+                'PLIO' (no unsigned or negative integers)
+                'HCOMPRESS'
+            (case-insensitive) See the cfitsio manual for details.
+        tile_dims: tuple of ints, optional
+            The size of the tiles used to compress images.
+        qlevel: float, optional
+            Quantization level for floating point data.  Lower generally result in
+            more compression, we recommend one reads the FITS standard or cfitsio
+            manual to fully understand the effects of quantization.  None or 0
+            means no quantization, and for gzip also implies lossless.  Default is
+            4.0 which follows the fpack defaults
+        qmethod: string or int
+            The quantization method as string or integer.
+                'NO_DITHER' or fitsio.NO_DITHER (-1)
+                   No dithering is performed
+                'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+                    Standard dithering
+                'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+                    Preserves zeros
+
+            Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+
+        hcomp_scale: float
+            Scale value for HCOMPRESS, 0.0 means lossless compression. Default is 0.0
+            following the fpack defaults.
+        hcomp_smooth: bool
+            If True, apply smoothing when decompressing.  Default False
+
+        header: FITSHDR, list, dict, optional
+            A set of header keys to write. Can be one of these:
+                - FITSHDR object
+                - list of dictionaries containing 'name','value' and optionally
+                  a 'comment' field; the order is preserved.
+                - a dictionary of keyword-value pairs; no comments are written
+                  in this case, and the order is arbitrary.
+            Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+
+
+        restrictions
+        ------------
+        The File must be opened READWRITE
+        """
+
+        self.create_image_hdu(
+            img,
+            header=header,
+            extname=extname,
+            extver=extver,
+            compress=compress,
+            tile_dims=tile_dims,
+            qlevel=qlevel,
+            qmethod=qmethod,
+            hcomp_scale=hcomp_scale,
+            hcomp_smooth=hcomp_smooth,
+        )
+
+        if header is not None:
+            self[-1].write_keys(header)
+            self[-1]._update_info()
+
+        # if img is not None:
+        #    self[-1].write(img)
+
+    def create_image_hdu(self,
+                         img=None,
+                         dims=None,
+                         dtype=None,
+                         extname=None,
+                         extver=None,
+                         compress=None,
+                         tile_dims=None,
+                         qlevel=DEFAULT_QLEVEL,
+                         qmethod=DEFAULT_QMETHOD,
+                         hcomp_scale=DEFAULT_HCOMP_SCALE,
+                         hcomp_smooth=False,
+                         header=None):
+        """
+        Create a new, empty image HDU and reload the hdu list.  Either
+        create from an input image or from input dims and dtype
+
+            fits.create_image_hdu(image, ...)
+            fits.create_image_hdu(dims=dims, dtype=dtype)
+
+        If an image is sent, the data are also written.
+
+        You can write data into the new extension using
+            fits[extension].write(image)
+
+        Alternatively you can skip calling this function and instead just use
+
+            fits.write(image)
+            or
+            fits.write_image(image)
+
+        which will create the new image extension for you with the appropriate
+        structure, and write the data.
+
+        parameters
+        ----------
+        img: ndarray, optional
+            An image with which to determine the properties of the HDU. The
+            data will be written.
+        dims: sequence, optional
+            A sequence describing the dimensions of the image to be created
+            on disk.  You must also send a dtype=
+        dtype: numpy data type
+            When sending dims= also send the data type.  Can be of the
+            various numpy data type declaration styles, e.g. 'f8',
+            numpy.float64.
+        extname: string, optional
+            An optional extension name.
+        extver: integer, optional
+            FITS allows multiple extensions to have the same name (extname).
+            These extensions can optionally specify an EXTVER version number in
+            the header.  Send extver= to set a particular version, which will
+            be represented in the header with keyname EXTVER.  The extver must
+            be an integer > 0.  If extver is not sent, the first one will be
+            selected.  If ext is an integer, the extver is ignored.
+        compress: string, optional
+            A string representing the compression algorithm for images,
+            default None.
+            Can be one of
+                'RICE'
+                'GZIP'
+                'GZIP_2'
+                'PLIO' (no unsigned or negative integers)
+                'HCOMPRESS'
+            (case-insensitive) See the cfitsio manual for details.
+        tile_dims: tuple of ints, optional
+            The size of the tiles used to compress images.
+        qlevel: float, optional
+            Quantization level for floating point data.  Lower generally result in
+            more compression, we recommend one reads the FITS standard or cfitsio
+            manual to fully understand the effects of quantization.  None or 0
+            means no quantization, and for gzip also implies lossless.  Default is
+            4.0 which follows the fpack defaults.
+        qmethod: string or int
+            The quantization method as string or integer.
+                'NO_DITHER' or fitsio.NO_DITHER (-1)
+                   No dithering is performed
+                'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+                    Standard dithering
+                'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+                    Preserves zeros
+
+            Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+
+        hcomp_scale: float
+            Scale value for HCOMPRESS, 0.0 means lossless compression. Default is 0.0
+            following the fpack defaults.
+        hcomp_smooth: bool
+            If True, apply smoothing when decompressing.  Default False
+
+        header: FITSHDR, list, dict, optional
+            This is only used to determine how many slots to reserve for
+            header keywords
+
+        restrictions
+        ------------
+        The File must be opened READWRITE
+        """
+
+        if (img is not None) or (img is None and dims is None):
+            from_image = True
+        elif dims is not None:
+            from_image = False
+
+        if from_image:
+            img2send = img
+            if img is not None:
+                dims = img.shape
+                dtstr = img.dtype.descr[0][1][1:]
+                if img.size == 0:
+                    raise ValueError("data must have at least 1 row")
+
+                # data must be c-contiguous and native byte order
+                if not img.flags['C_CONTIGUOUS']:
+                    # this always makes a copy
+                    img2send = numpy.ascontiguousarray(img)
+                    array_to_native(img2send, inplace=True)
+                else:
+                    img2send = array_to_native(img, inplace=False)
+
+                if IS_PY3 and img2send.dtype.char == 'U':
+                    # for python3, we convert unicode to ascii
+                    # this will error if the character is not in ascii
+                    img2send = img2send.astype('S', copy=False)
+
+            else:
+                self._ensure_empty_image_ok()
+                compress = None
+                tile_dims = None
+
+            # we get dims from the input image
+            dims2send = None
+        else:
+            # img was None and dims was sent
+            if dtype is None:
+                raise ValueError("send dtype= with dims=")
+
+            # this must work!
+            dtype = numpy.dtype(dtype)
+            dtstr = dtype.descr[0][1][1:]
+            # use the example image to build the type in C
+            img2send = numpy.zeros(1, dtype=dtype)
+
+            # sending an array simplifies access
+            dims2send = numpy.array(dims, dtype='i8', ndmin=1)
+
+        if img2send is not None:
+            if img2send.dtype.fields is not None:
+                raise ValueError(
+                    "got record data type, expected regular ndarray")
+
+        if extname is None:
+            # will be ignored
+            extname = ""
+        else:
+            if not isstring(extname):
+                raise ValueError("extension name must be a string")
+            extname = mks(extname)
+
+        if extname is not None and extver is not None:
+            extver = check_extver(extver)
+
+        if extver is None:
+            # will be ignored
+            extver = 0
+
+        comptype = get_compress_type(compress)
+        qmethod = get_qmethod(qmethod)
+
+        tile_dims = get_tile_dims(tile_dims, dims)
+        if qlevel is None:
+            # 0.0 is the sentinel value for "no quantization" in cfitsio
+            qlevel = 0.0
+        else:
+            qlevel = float(qlevel)
+
+        if img2send is not None:
+            check_comptype_img(comptype, dtstr)
+
+        if header is not None:
+            nkeys = len(header)
+        else:
+            nkeys = 0
+
+        if hcomp_smooth:
+            hcomp_smooth = 1
+        else:
+            hcomp_smooth = 0
+
+        self._FITS.create_image_hdu(
+            img2send,
+            nkeys,
+            dims=dims2send,
+            comptype=comptype,
+            tile_dims=tile_dims,
+
+            qlevel=qlevel,
+            qmethod=qmethod,
+
+            hcomp_scale=hcomp_scale,
+            hcomp_smooth=hcomp_smooth,
+
+            extname=extname,
+            extver=extver,
+        )
+
+        # don't rebuild the whole list unless this is the first hdu
+        # to be created
+        self.update_hdu_list(rebuild=False)
+
+    def _ensure_empty_image_ok(self):
+        """
+        If ignore_empty was not set to True, we only allow empty HDU for first
+        HDU and if there is no data there already
+        """
+        if self.ignore_empty:
+            return
+
+        if len(self) > 1:
+            raise RuntimeError(
+                "Cannot write None image at extension %d" % len(self))
+        if 'ndims' in self[0]._info:
+            raise RuntimeError("Can only write None images to extension zero, "
+                               "which already exists")
+
+    def write_table(self, data, table_type='binary',
+                    names=None, formats=None, units=None,
+                    extname=None, extver=None, header=None,
+                    write_bitcols=False):
+        """
+        Create a new table extension and write the data.
+
+        The table definition is taken from the fields in the input array.  If
+        you want to append new rows to the table, access the HDU directly and
+        use the write() function, e.g.
+
+            fits[extension].append(data)
+
+        parameters
+        ----------
+        data: recarray
+            A numpy array with fields.  The table definition will be
+            determined from this array.
+        table_type: string, optional
+            Either 'binary' or 'ascii', default 'binary'
+            Matching is case-insensitive
+        extname: string, optional
+            An optional string for the extension name.
+        extver: integer, optional
+            FITS allows multiple extensions to have the same name (extname).
+            These extensions can optionally specify an EXTVER version number in
+            the header.  Send extver= to set a particular version, which will
+            be represented in the header with keyname EXTVER.  The extver must
+            be an integer > 0.  If extver is not sent, the first one will be
+            selected.  If ext is an integer, the extver is ignored.
+        units: list/dec, optional:
+            A list of strings with units for each column.
+        header: FITSHDR, list, dict, optional
+            A set of header keys to write. The keys are written before the data
+            is written to the table, preventing a resizing of the table area.
+
+            Can be one of these:
+                - FITSHDR object
+                - list of dictionaries containing 'name','value' and optionally
+                  a 'comment' field; the order is preserved.
+                - a dictionary of keyword-value pairs; no comments are written
+                  in this case, and the order is arbitrary.
+            Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+        write_bitcols: boolean, optional
+            Write boolean arrays in the FITS bitcols format, default False
+
+        restrictions
+        ------------
+        The File must be opened READWRITE
+        """
+
+        """
+        if data.dtype.fields == None:
+            raise ValueError("data must have fields")
+        if data.size == 0:
+            raise ValueError("data must have at least 1 row")
+        """
+
+        self.create_table_hdu(data=data,
+                              header=header,
+                              names=names,
+                              units=units,
+                              extname=extname,
+                              extver=extver,
+                              table_type=table_type,
+                              write_bitcols=write_bitcols)
+
+        if header is not None:
+            self[-1].write_keys(header)
+            self[-1]._update_info()
+
+        self[-1].write(data, names=names)
+
+    def read_raw(self):
+        """
+        Reads the raw FITS file contents, returning a Python string.
+        """
+        return self._FITS.read_raw()
+
+    def create_table_hdu(self, data=None, dtype=None,
+                         header=None,
+                         names=None, formats=None,
+                         units=None, dims=None, extname=None, extver=None,
+                         table_type='binary', write_bitcols=False):
+        """
+        Create a new, empty table extension and reload the hdu list.
+
+        There are three ways to do it:
+            1) send a numpy dtype, from which the formats in the fits file will
+               be determined.
+            2) Send an array in data= keyword.  this is required if you have
+                object fields for writing to variable length columns.
+            3) send the names,formats and dims yourself
+
+        You can then write data into the new extension using
+            fits[extension].write(array)
+        If you want to write to a single column
+            fits[extension].write_column(array)
+        But be careful as the other columns will be left zeroed.
+
+        Often you will instead just use write_table to do this all
+        atomically.
+
+            fits.write_table(recarray)
+
+        write_table will create the new table extension for you with the
+        appropriate fields.
+
+        parameters
+        ----------
+        dtype: numpy dtype or descriptor, optional
+            If you have an array with fields, you can just send arr.dtype.  You
+            can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or
+            a dictionary representation.
+        data: a numpy array with fields, optional
+              or a dictionary
+
+            An array or dict from which to determine the table definition.  You
+            must use this instead of sending a descriptor if you have object
+            array fields, as this is the only way to determine the type and max
+            size.
+
+        names: list of strings, optional
+            The list of field names
+        formats: list of strings, optional
+            The TFORM format strings for each field.
+        dims: list of strings, optional
+            An optional list of dimension strings for each field.  Should
+            match the repeat count for the formats fields. Be careful of
+            the order since FITS is more like fortran. See the descr2tabledef
+            function.
+
+        table_type: string, optional
+            Either 'binary' or 'ascii', default 'binary'
+            Matching is case-insensitive
+        units: list of strings, optional
+            An optional list of unit strings for each field.
+        extname: string, optional
+            An optional extension name.
+        extver: integer, optional
+            FITS allows multiple extensions to have the same name (extname).
+            These extensions can optionally specify an EXTVER version number in
+            the header.  Send extver= to set a particular version, which will
+            be represented in the header with keyname EXTVER.  The extver must
+            be an integer > 0.  If extver is not sent, the first one will be
+            selected.  If ext is an integer, the extver is ignored.
+        write_bitcols: bool, optional
+            Write boolean arrays in the FITS bitcols format, default False
+
+        header: FITSHDR, list, dict, optional
+            This is only used to determine how many slots to reserve for
+            header keywords
+
+
+        restrictions
+        ------------
+        The File must be opened READWRITE
+        """
+
+        # record this for the TableHDU object
+        write_bitcols = self.write_bitcols or write_bitcols
+
+        # can leave as turn
+        table_type_int = _extract_table_type(table_type)
+
+        if data is not None:
+            if isinstance(data, numpy.ndarray):
+                names, formats, dims = array2tabledef(
+                    data, table_type=table_type, write_bitcols=write_bitcols)
+            elif isinstance(data, (list, dict)):
+                names, formats, dims = collection2tabledef(
+                    data, names=names, table_type=table_type,
+                    write_bitcols=write_bitcols)
+            else:
+                raise ValueError(
+                    "data must be an ndarray with fields or a dict")
+        elif dtype is not None:
+            dtype = numpy.dtype(dtype)
+            names, formats, dims = descr2tabledef(
+                dtype.
+                descr,
+                write_bitcols=write_bitcols,
+                table_type=table_type,
+            )
+        else:
+            if names is None or formats is None:
+                raise ValueError(
+                    "send either dtype=, data=, or names= and formats=")
+
+            if not isinstance(names, list) or not isinstance(formats, list):
+                raise ValueError("names and formats should be lists")
+            if len(names) != len(formats):
+                raise ValueError("names and formats must be same length")
+
+            if dims is not None:
+                if not isinstance(dims, list):
+                    raise ValueError("dims should be a list")
+                if len(dims) != len(names):
+                    raise ValueError("names and dims must be same length")
+
+        if units is not None:
+            if not isinstance(units, list):
+                raise ValueError("units should be a list")
+            if len(units) != len(names):
+                raise ValueError("names and units must be same length")
+
+        if extname is None:
+            # will be ignored
+            extname = ""
+        else:
+            if not isstring(extname):
+                raise ValueError("extension name must be a string")
+            extname = mks(extname)
+
+        if extname is not None and extver is not None:
+            extver = check_extver(extver)
+        if extver is None:
+            # will be ignored
+            extver = 0
+        if extname is None:
+            # will be ignored
+            extname = ""
+
+        if header is not None:
+            nkeys = len(header)
+        else:
+            nkeys = 0
+
+        # note we can create extname in the c code for tables, but not images
+        self._FITS.create_table_hdu(table_type_int, nkeys,
+                                    names, formats, tunit=units, tdim=dims,
+                                    extname=extname, extver=extver)
+
+        # don't rebuild the whole list unless this is the first hdu
+        # to be created
+        self.update_hdu_list(rebuild=False)
+
+    def update_hdu_list(self, rebuild=True):
+        """
+        Force an update of the entire HDU list
+
+        Normally you don't need to call this method directly
+
+        if rebuild is false or the hdu_list is not yet set, the list is
+        rebuilt from scratch
+        """
+        if not hasattr(self, 'hdu_list'):
+            rebuild = True
+
+        if rebuild:
+            self.hdu_list = []
+            self.hdu_map = {}
+
+            # we don't know how many hdus there are, so iterate
+            # until we can't open any more
+            ext_start = 0
+        else:
+            # start from last
+            ext_start = len(self)
+
+        ext = ext_start
+        while True:
+            try:
+                self._append_hdu_info(ext)
+            except IOError:
+                break
+            except RuntimeError:
+                break
+
+            ext = ext + 1
+
+    def _append_hdu_info(self, ext):
+        """
+        internal routine
+
+        append info for indiciated extension
+        """
+
+        # raised IOError if not found
+        hdu_type = self._FITS.movabs_hdu(ext+1)
+
+        if hdu_type == IMAGE_HDU:
+            hdu = ImageHDU(self._FITS, ext)
+        elif hdu_type == BINARY_TBL:
+            hdu = TableHDU(
+                self._FITS, ext,
+                lower=self.lower, upper=self.upper,
+                trim_strings=self.trim_strings,
+                vstorage=self.vstorage, case_sensitive=self.case_sensitive,
+                iter_row_buffer=self.iter_row_buffer,
+                write_bitcols=self.write_bitcols)
+        elif hdu_type == ASCII_TBL:
+            hdu = AsciiTableHDU(
+                self._FITS, ext,
+                lower=self.lower, upper=self.upper,
+                trim_strings=self.trim_strings,
+                vstorage=self.vstorage, case_sensitive=self.case_sensitive,
+                iter_row_buffer=self.iter_row_buffer,
+                write_bitcols=self.write_bitcols)
+        else:
+            mess = ("extension %s is of unknown type %s "
+                    "this is probably a bug")
+            mess = mess % (ext, hdu_type)
+            raise IOError(mess)
+
+        self.hdu_list.append(hdu)
+        self.hdu_map[ext] = hdu
+
+        extname = hdu.get_extname()
+        if not self.case_sensitive:
+            extname = extname.lower()
+        if extname != '':
+            # this will guarantee we default to *first* version,
+            # if version is not requested, using __getitem__
+            if extname not in self.hdu_map:
+                self.hdu_map[extname] = hdu
+
+            ver = hdu.get_extver()
+            if ver > 0:
+                key = '%s-%s' % (extname, ver)
+                self.hdu_map[key] = hdu
+
+    def __iter__(self):
+        """
+        begin iteration over HDUs
+        """
+        if not hasattr(self, 'hdu_list'):
+            self.update_hdu_list()
+        self._iter_index = 0
+        return self
+
+    def next(self):
+        """
+        Move to the next iteration
+        """
+        if self._iter_index == len(self.hdu_list):
+            raise StopIteration
+        hdu = self.hdu_list[self._iter_index]
+        self._iter_index += 1
+        return hdu
+
+    __next__ = next
+
+    def __len__(self):
+        """
+        get the number of extensions
+        """
+        if not hasattr(self, 'hdu_list'):
+            self.update_hdu_list()
+        return len(self.hdu_list)
+
+    def _extract_item(self, item):
+        """
+        utility function to extract an "item", meaning
+        a extension number,name plus version.
+        """
+        ver = 0
+        if isinstance(item, tuple):
+            ver_sent = True
+            nitem = len(item)
+            if nitem == 1:
+                ext = item[0]
+            elif nitem == 2:
+                ext, ver = item
+        else:
+            ver_sent = False
+            ext = item
+        return ext, ver, ver_sent
+
+    def __getitem__(self, item):
+        """
+        Get an hdu by number, name, and possibly version
+        """
+        if not hasattr(self, 'hdu_list'):
+            if self._did_create:
+                # we created the file and haven't written anything yet
+                raise ValueError("Requested hdu '%s' not present" % item)
+
+            self.update_hdu_list()
+
+        if len(self) == 0:
+            raise ValueError("Requested hdu '%s' not present" % item)
+
+        ext, ver, ver_sent = self._extract_item(item)
+
+        try:
+            # if it is an int
+            hdu = self.hdu_list[ext]
+        except Exception:
+            # might be a string
+            ext = mks(ext)
+            if not self.case_sensitive:
+                mess = '(case insensitive)'
+                ext = ext.lower()
+            else:
+                mess = '(case sensitive)'
+
+            if ver > 0:
+                key = '%s-%s' % (ext, ver)
+                if key not in self.hdu_map:
+                    raise IOError("extension not found: %s, "
+                                  "version %s %s" % (ext, ver, mess))
+                hdu = self.hdu_map[key]
+            else:
+                if ext not in self.hdu_map:
+                    raise IOError("extension not found: %s %s" % (ext, mess))
+                hdu = self.hdu_map[ext]
+
+        return hdu
+
+    def __contains__(self, item):
+        """
+        tell whether specified extension exists, possibly
+        with version sent as well
+        """
+        try:
+            hdu = self[item]  # noqa
+            return True
+        except Exception:
+            return False
+
+    def __repr__(self):
+        """
+        Text representation of some fits file metadata
+        """
+        spacing = ' '*2
+        rep = ['']
+        rep.append("%sfile: %s" % (spacing, self._filename))
+        rep.append("%smode: %s" % (spacing, _modeprint_map[self.intmode]))
+
+        rep.append('%sextnum %-15s %s' % (spacing, "hdutype", "hduname[v]"))
+
+        if not hasattr(self, 'hdu_list'):
+            if not self._did_create:
+                # we expect some stuff
+                self.update_hdu_list()
+
+                for i, hdu in enumerate(self.hdu_list):
+                    t = hdu._info['hdutype']
+                    name = hdu.get_extname()
+                    if name != '':
+                        ver = hdu.get_extver()
+                        if ver != 0:
+                            name = '%s[%s]' % (name, ver)
+
+                    rep.append(
+                        "%s%-6d %-15s %s" % (spacing, i, _hdu_type_map[t], name))
+
+        rep = '\n'.join(rep)
+        return rep
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exception_type, exception_value, traceback):
+        self.close()
+
+
+def check_extver(extver):
+    if extver is None:
+        return 0
+    extver = int(extver)
+    if extver <= 0:
+        raise ValueError("extver must be > 0")
+    return extver
+
+
+def extract_filename(filename):
+    filename = mks(filename)
+    filename = filename.strip()
+    if filename[0] == "!":
+        filename = filename[1:]
+    filename = os.path.expandvars(filename)
+    filename = os.path.expanduser(filename)
+    return filename
+
+
+def array2tabledef(data, table_type='binary', write_bitcols=False):
+    """
+    Similar to descr2tabledef but if there are object columns a type
+    and max length will be extracted and used for the tabledef
+    """
+    is_ascii = (table_type == 'ascii')
+
+    if data.dtype.fields is None:
+        raise ValueError("data must have fields")
+    names = []
+    names_nocase = {}
+    formats = []
+    dims = []
+
+    descr = data.dtype.descr
+    for d in descr:
+        # these have the form '<f4' or '|S25', etc.  Extract the pure type
+        npy_dtype = d[1][1:]
+        if is_ascii:
+            if npy_dtype in ['u1', 'i1']:
+                raise ValueError(
+                    "1-byte integers are not supported for "
+                    "ascii tables: '%s'" % npy_dtype)
+            if npy_dtype in ['u2']:
+                raise ValueError(
+                    "unsigned 2-byte integers are not supported for "
+                    "ascii tables: '%s'" % npy_dtype)
+
+        if npy_dtype[0] == 'O':
+            # this will be a variable length column 1Pt(len) where t is the
+            # type and len is max length.  Each element must be convertible to
+            # the same type as the first
+            name = d[0]
+            form, dim = npy_obj2fits(data, name)
+        elif npy_dtype[0] == "V":
+            continue
+        else:
+            name, form, dim = _npy2fits(
+                d, table_type=table_type, write_bitcols=write_bitcols)
+
+        if name == '':
+            raise ValueError("field name is an empty string")
+
+        """
+        if is_ascii:
+            if dim is not None:
+                raise ValueError("array columns are not supported for "
+                                 "ascii tables")
+        """
+        name_nocase = name.upper()
+        if name_nocase in names_nocase:
+            raise ValueError(
+                "duplicate column name found: '%s'.  Note "
+                "FITS column names are not case sensitive" % name_nocase)
+
+        names.append(name)
+        names_nocase[name_nocase] = name_nocase
+
+        formats.append(form)
+        dims.append(dim)
+
+    return names, formats, dims
+
+
+def collection2tabledef(
+        data, names=None, table_type='binary', write_bitcols=False):
+    if isinstance(data, dict):
+        if names is None:
+            names = list(data.keys())
+        isdict = True
+    elif isinstance(data, list):
+        if names is None:
+            raise ValueError("For list of array, send names=")
+        isdict = False
+    else:
+        raise ValueError("expected a dict")
+
+    is_ascii = (table_type == 'ascii')
+    formats = []
+    dims = []
+
+    for i, name in enumerate(names):
+
+        if isdict:
+            this_data = data[name]
+        else:
+            this_data = data[i]
+
+        dt = this_data.dtype.descr[0]
+        dname = dt[1][1:]
+
+        if is_ascii:
+            if dname in ['u1', 'i1']:
+                raise ValueError(
+                    "1-byte integers are not supported for "
+                    "ascii tables: '%s'" % dname)
+            if dname in ['u2']:
+                raise ValueError(
+                    "unsigned 2-byte integers are not supported for "
+                    "ascii tables: '%s'" % dname)
+
+        if dname[0] == 'O':
+            # this will be a variable length column 1Pt(len) where t is the
+            # type and len is max length.  Each element must be convertible to
+            # the same type as the first
+            form, dim = npy_obj2fits(this_data)
+        else:
+            send_dt = dt
+            if len(this_data.shape) > 1:
+                send_dt = list(dt) + [this_data.shape[1:]]
+            _, form, dim = _npy2fits(
+                send_dt, table_type=table_type, write_bitcols=write_bitcols)
+
+        formats.append(form)
+        dims.append(dim)
+
+    return names, formats, dims
+
+
+def descr2tabledef(descr, table_type='binary', write_bitcols=False):
+    """
+    Create a FITS table def from the input numpy descriptor.
+
+    parameters
+    ----------
+    descr: list
+        A numpy recarray type descriptor  array.dtype.descr
+
+    returns
+    -------
+    names, formats, dims: tuple of lists
+        These are the ttyp, tform and tdim header entries
+        for each field.  dim entries may be None
+    """
+    names = []
+    formats = []
+    dims = []
+
+    for d in descr:
+
+        """
+        npy_dtype = d[1][1:]
+        if is_ascii and npy_dtype in ['u1','i1']:
+            raise ValueError("1-byte integers are not supported for "
+                             "ascii tables")
+        """
+
+        if d[1][1] == 'O':
+            raise ValueError(
+                'cannot automatically declare a var column without '
+                'some data to determine max len')
+
+        name, form, dim = _npy2fits(
+            d, table_type=table_type, write_bitcols=write_bitcols)
+
+        if name == '':
+            raise ValueError("field name is an empty string")
+
+        """
+        if is_ascii:
+            if dim is not None:
+                raise ValueError("array columns are not supported "
+                                 "for ascii tables")
+        """
+
+        names.append(name)
+        formats.append(form)
+        dims.append(dim)
+
+    return names, formats, dims
+
+
+def npy_obj2fits(data, name=None):
+    # this will be a variable length column 1Pt(len) where t is the
+    # type and len is max length.  Each element must be convertible to
+    # the same type as the first
+
+    if name is None:
+        d = data.dtype.descr
+        first = data[0]
+    else:
+        d = data[name].dtype.descr  # noqa - not used
+        first = data[name][0]
+
+    # note numpy._string is an instance of str in python2, bytes
+    # in python3
+    if isinstance(first, str) or (IS_PY3 and isinstance(first, bytes)):
+        if IS_PY3:
+            if isinstance(first, str):
+                fits_dtype = _table_npy2fits_form['U']
+            else:
+                fits_dtype = _table_npy2fits_form['S']
+        else:
+            fits_dtype = _table_npy2fits_form['S']
+    else:
+        arr0 = numpy.array(first, copy=False)
+        dtype0 = arr0.dtype
+        npy_dtype = dtype0.descr[0][1][1:]
+        if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
+            raise ValueError("Field '%s' is an arrays of strings, this is "
+                             "not allowed in variable length columns" % name)
+        if npy_dtype not in _table_npy2fits_form:
+            raise ValueError(
+                "Field '%s' has unsupported type '%s'" % (name, npy_dtype))
+        fits_dtype = _table_npy2fits_form[npy_dtype]
+
+    # Q uses 64-bit addressing, should try at some point but the cfitsio manual
+    # says it is experimental
+    # form = '1Q%s' % fits_dtype
+    form = '1P%s' % fits_dtype
+    dim = None
+
+    return form, dim
+
+
+def get_tile_dims(tile_dims, imshape):
+    """
+    Just make sure the tile dims has the appropriate number of dimensions
+    """
+
+    if tile_dims is None:
+        td = None
+    else:
+        td = numpy.array(tile_dims, dtype='i8')
+        nd = len(imshape)
+        if td.size != nd:
+            msg = "expected tile_dims to have %d dims, got %d" % (td.size, nd)
+            raise ValueError(msg)
+
+    return td
+
+
+def get_compress_type(compress):
+    if compress is not None:
+        compress = str(compress).upper()
+    if compress not in _compress_map:
+        raise ValueError(
+            "compress must be one of %s" % list(_compress_map.keys()))
+    return _compress_map[compress]
+
+
+def get_qmethod(qmethod):
+    if qmethod not in _qmethod_map:
+        if isinstance(qmethod, str):
+            qmethod = qmethod.upper()
+        elif isinstance(qmethod, bytes):
+            # in py27, bytes are str, so we can safely assume
+            # py3 here
+            qmethod = str(qmethod, 'ascii').upper()
+
+    if qmethod not in _qmethod_map:
+        raise ValueError(
+            "qmethod must be one of %s" % list(_qmethod_map.keys()))
+
+    return _qmethod_map[qmethod]
+
+
+def check_comptype_img(comptype, dtype_str):
+
+    if comptype == NOCOMPRESS:
+        return
+
+    # if dtype_str == 'i8':
+        # no i8 allowed for tile-compressed images
+    #    raise ValueError("8-byte integers not supported when "
+    #                     "using tile compression")
+
+    if comptype == PLIO_1:
+        # no unsigned u4/u8 for plio
+        if dtype_str == 'u4' or dtype_str == 'u8':
+            raise ValueError("Unsigned 4/8-byte integers currently not "
+                             "allowed when writing using PLIO "
+                             "tile compression")
+
+
+def _extract_table_type(type):
+    """
+    Get the numerical table type
+    """
+    if isinstance(type, str):
+        type = type.lower()
+        if type[0:7] == 'binary':
+            table_type = BINARY_TBL
+        elif type[0:6] == 'ascii':
+            table_type = ASCII_TBL
+        else:
+            raise ValueError(
+                "table type string should begin with 'binary' or 'ascii' "
+                "(case insensitive)")
+    else:
+        type = int(type)
+        if type not in [BINARY_TBL, ASCII_TBL]:
+            raise ValueError(
+                "table type num should be BINARY_TBL (%d) or "
+                "ASCII_TBL (%d)" % (BINARY_TBL, ASCII_TBL))
+        table_type = type
+
+    return table_type
+
+
+_compress_map = {
+    None: NOCOMPRESS,
+    'RICE': RICE_1,
+    'RICE_1': RICE_1,
+    'GZIP': GZIP_1,
+    'GZIP_1': GZIP_1,
+    'GZIP_2': GZIP_2,
+    'PLIO': PLIO_1,
+    'PLIO_1': PLIO_1,
+    'HCOMPRESS': HCOMPRESS_1,
+    'HCOMPRESS_1': HCOMPRESS_1,
+    NOCOMPRESS: None,
+    RICE_1: 'RICE_1',
+    GZIP_1: 'GZIP_1',
+    GZIP_2: 'GZIP_2',
+    PLIO_1: 'PLIO_1',
+    HCOMPRESS_1: 'HCOMPRESS_1',
+}
+
+_qmethod_map = {
+    None: NO_DITHER,
+    'NO_DITHER': NO_DITHER,
+    'SUBTRACTIVE_DITHER_1': SUBTRACTIVE_DITHER_1,
+    'SUBTRACTIVE_DITHER_2': SUBTRACTIVE_DITHER_2,
+    NO_DITHER: NO_DITHER,
+    SUBTRACTIVE_DITHER_1: SUBTRACTIVE_DITHER_1,
+    SUBTRACTIVE_DITHER_2: SUBTRACTIVE_DITHER_2,
+}
+
+_modeprint_map = {
+    'r': 'READONLY', 'rw': 'READWRITE', 0: 'READONLY', 1: 'READWRITE'}
+_char_modemap = {
+    'r': 'r', 'rw': 'rw',
+    READONLY: 'r', READWRITE: 'rw'}
+_int_modemap = {
+    'r': READONLY, 'rw': READWRITE, READONLY: READONLY, READWRITE: READWRITE}
diff --git a/fitsio/hdu/__init__.py b/fitsio/hdu/__init__.py
new file mode 100644 (file)
index 0000000..e949c75
--- /dev/null
@@ -0,0 +1,9 @@
+from .base import (  # noqa
+    ANY_HDU, BINARY_TBL, ASCII_TBL, IMAGE_HDU, _hdu_type_map)
+from .image import ImageHDU  # noqa
+from .table import (  # noqa
+    TableHDU,
+    AsciiTableHDU,
+    _table_npy2fits_form,
+    _npy2fits,
+)
diff --git a/fitsio/hdu/base.py b/fitsio/hdu/base.py
new file mode 100644 (file)
index 0000000..508dab5
--- /dev/null
@@ -0,0 +1,389 @@
+import copy
+import warnings
+
+from ..util import _stypes, _itypes, _ftypes, FITSRuntimeWarning
+from ..header import FITSHDR
+
+ANY_HDU = -1
+IMAGE_HDU = 0
+ASCII_TBL = 1
+BINARY_TBL = 2
+
+_hdu_type_map = {
+    IMAGE_HDU: 'IMAGE_HDU',
+    ASCII_TBL: 'ASCII_TBL',
+    BINARY_TBL: 'BINARY_TBL',
+    'IMAGE_HDU': IMAGE_HDU,
+    'ASCII_TBL': ASCII_TBL,
+    'BINARY_TBL': BINARY_TBL}
+
+
+class HDUBase(object):
+    """
+    A representation of a FITS HDU
+
+    parameters
+    ----------
+    fits: FITS object
+        An instance of a _fistio_wrap.FITS object.  This is the low-level
+        python object, not the FITS object defined above.
+    ext: integer
+        The extension number.
+    """
+    def __init__(self, fits, ext, **keys):
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        self._FITS = fits
+        self._ext = ext
+        self._ignore_scaling = False
+
+        self._update_info()
+        self._filename = self._FITS.filename()
+
+    @property
+    def ignore_scaling(self):
+        """
+        :return: Flag to indicate whether scaling (BZERO/BSCALE) values should
+        be ignored.
+        """
+        return self._ignore_scaling
+
+    @ignore_scaling.setter
+    def ignore_scaling(self, ignore_scaling_flag):
+        """
+        Set the flag to ignore scaling.
+        """
+        old_val = self._ignore_scaling
+        self._ignore_scaling = ignore_scaling_flag
+
+        # Only endure the overhead of updating the info if the new value is
+        # actually different.
+        if old_val != self._ignore_scaling:
+            self._update_info()
+
+    def get_extnum(self):
+        """
+        Get the extension number
+        """
+        return self._ext
+
+    def get_extname(self):
+        """
+        Get the name for this extension, can be an empty string
+        """
+        name = self._info['extname']
+        if name.strip() == '':
+            name = self._info['hduname']
+        return name.strip()
+
+    def get_extver(self):
+        """
+        Get the version for this extension.
+
+        Used when a name is given to multiple extensions
+        """
+        ver = self._info['extver']
+        if ver == 0:
+            ver = self._info['hduver']
+        return ver
+
+    def get_exttype(self, num=False):
+        """
+        Get the extension type
+
+        By default the result is a string that mirrors
+        the enumerated type names in cfitsio
+            'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL'
+        which have numeric values
+            0 1 2
+        send num=True to get the numbers.  The values
+            fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL
+        are available for comparison
+
+        parameters
+        ----------
+        num: bool, optional
+            Return the numeric values.
+        """
+        if num:
+            return self._info['hdutype']
+        else:
+            name = _hdu_type_map[self._info['hdutype']]
+            return name
+
+    def get_offsets(self):
+        """
+        returns
+        -------
+        a dictionary with these entries
+
+        header_start:
+            byte offset from beginning of the file to the start
+            of the header
+        data_start:
+            byte offset from beginning of the file to the start
+            of the data section
+        data_end:
+            byte offset from beginning of the file to the end
+            of the data section
+
+        Note these are also in the information dictionary, which
+        you can access with get_info()
+        """
+        return dict(
+            header_start=self._info['header_start'],
+            data_start=self._info['data_start'],
+            data_end=self._info['data_end'],
+        )
+
+    def get_info(self):
+        """
+        Get a copy of the internal dictionary holding extension information
+        """
+        return copy.deepcopy(self._info)
+
+    def get_filename(self):
+        """
+        Get a copy of the filename for this fits file
+        """
+        return copy.copy(self._filename)
+
+    def write_checksum(self):
+        """
+        Write the checksum into the header for this HDU.
+
+        Computes the checksum for the HDU, both the data portion alone (DATASUM
+        keyword) and the checksum complement for the entire HDU (CHECKSUM).
+
+        returns
+        -------
+        A dict with keys 'datasum' and 'hdusum'
+        """
+        return self._FITS.write_checksum(self._ext+1)
+
+    def verify_checksum(self):
+        """
+        Verify the checksum in the header for this HDU.
+        """
+        res = self._FITS.verify_checksum(self._ext+1)
+        if res['dataok'] != 1:
+            raise ValueError("data checksum failed")
+        if res['hduok'] != 1:
+            raise ValueError("hdu checksum failed")
+
+    def write_comment(self, comment):
+        """
+        Write a comment into the header
+        """
+        self._FITS.write_comment(self._ext+1, str(comment))
+
+    def write_history(self, history):
+        """
+        Write history text into the header
+        """
+        self._FITS.write_history(self._ext+1, str(history))
+
+    def _write_continue(self, value):
+        """
+        Write history text into the header
+        """
+        self._FITS.write_continue(self._ext+1, str(value))
+
+    def write_key(self, name, value, comment=""):
+        """
+        Write the input value to the header
+
+        parameters
+        ----------
+        name: string
+            Name of keyword to write/update
+        value: scalar
+            Value to write, can be string float or integer type,
+            including numpy scalar types.
+        comment: string, optional
+            An optional comment to write for this key
+
+        Notes
+        -----
+        Write COMMENT and HISTORY using the write_comment and write_history
+        methods
+        """
+
+        if name is None:
+
+            # we write a blank keyword and the rest is a comment
+            # string
+
+            if not isinstance(comment, _stypes):
+                raise ValueError('when writing blank key the value '
+                                 'must be a string')
+
+            # this might be longer than 80 but that's ok, the routine
+            # will take care of it
+            # card = '         ' + str(comment)
+            card = '        ' + str(comment)
+            self._FITS.write_record(
+                self._ext+1,
+                card,
+            )
+
+        elif value is None:
+            self._FITS.write_undefined_key(self._ext+1,
+                                           str(name),
+                                           str(comment))
+
+        elif isinstance(value, bool):
+            if value:
+                v = 1
+            else:
+                v = 0
+            self._FITS.write_logical_key(self._ext+1,
+                                         str(name),
+                                         v,
+                                         str(comment))
+        elif isinstance(value, _stypes):
+            self._FITS.write_string_key(self._ext+1,
+                                        str(name),
+                                        str(value),
+                                        str(comment))
+        elif isinstance(value, _ftypes):
+            self._FITS.write_double_key(self._ext+1,
+                                        str(name),
+                                        float(value),
+                                        str(comment))
+        elif isinstance(value, _itypes):
+            self._FITS.write_long_long_key(self._ext+1,
+                                           str(name),
+                                           int(value),
+                                           str(comment))
+        elif isinstance(value, (tuple, list)):
+            vl = [str(el) for el in value]
+            sval = ','.join(vl)
+            self._FITS.write_string_key(self._ext+1,
+                                        str(name),
+                                        sval,
+                                        str(comment))
+        else:
+            sval = str(value)
+            mess = (
+                "warning, keyword '%s' has non-standard "
+                "value type %s, "
+                "Converting to string: '%s'")
+            warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning)
+            self._FITS.write_string_key(self._ext+1,
+                                        str(name),
+                                        sval,
+                                        str(comment))
+
+    def write_keys(self, records_in, clean=True):
+        """
+        Write the keywords to the header.
+
+        parameters
+        ----------
+        records: FITSHDR or list or dict
+            Can be one of these:
+                - FITSHDR object
+                - list of dictionaries containing 'name','value' and optionally
+                  a 'comment' field; the order is preserved.
+                - a dictionary of keyword-value pairs; no comments are written
+                  in this case, and the order is arbitrary.
+        clean: boolean
+            If True, trim out the standard fits header keywords that are
+            created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM,
+            TDIM, XTENSION, BITPIX, NAXIS, etc.
+
+        Notes
+        -----
+        Input keys named COMMENT and HISTORY are written using the
+        write_comment and write_history methods.
+        """
+
+        if isinstance(records_in, FITSHDR):
+            hdr = records_in
+        else:
+            hdr = FITSHDR(records_in)
+
+        if clean:
+            is_table = hasattr(self, '_table_type_str')
+            # is_table = isinstance(self, TableHDU)
+            hdr.clean(is_table=is_table)
+
+        for r in hdr.records():
+            name = r['name']
+            if name is not None:
+                name = name.upper()
+
+            value = r['value']
+
+            if name == 'COMMENT':
+                self.write_comment(value)
+            elif name == 'HISTORY':
+                self.write_history(value)
+            elif name == 'CONTINUE':
+                self._write_continue(value)
+            else:
+                comment = r.get('comment', '')
+                self.write_key(name, value, comment=comment)
+
+    def read_header(self):
+        """
+        Read the header as a FITSHDR
+
+        The FITSHDR allows access to the values and comments by name and
+        number.
+        """
+        # note converting strings
+        return FITSHDR(self.read_header_list())
+
+    def read_header_list(self):
+        """
+        Read the header as a list of dictionaries.
+
+        You will usually use read_header instead, which just sends the output
+        of this functioin to the constructor of a FITSHDR, which allows access
+        to the values and comments by name and number.
+
+        Each dictionary is
+            'name': the keyword name
+            'value': the value field as a string
+            'comment': the comment field as a string.
+        """
+        return self._FITS.read_header(self._ext+1)
+
+    def _update_info(self):
+        """
+        Update metadata for this HDU
+        """
+        try:
+            self._FITS.movabs_hdu(self._ext+1)
+        except IOError:
+            raise RuntimeError("no such hdu")
+
+        self._info = self._FITS.get_hdu_info(self._ext+1, self._ignore_scaling)
+
+    def _get_repr_list(self):
+        """
+        Get some representation data common to all HDU types
+        """
+        spacing = ' '*2
+        text = ['']
+        text.append("%sfile: %s" % (spacing, self._filename))
+        text.append("%sextension: %d" % (spacing, self._info['hdunum']-1))
+        text.append(
+            "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']]))
+
+        extname = self.get_extname()
+        if extname != "":
+            text.append("%sextname: %s" % (spacing, extname))
+        extver = self.get_extver()
+        if extver != 0:
+            text.append("%sextver: %s" % (spacing, extver))
+
+        return text, spacing
diff --git a/fitsio/hdu/image.py b/fitsio/hdu/image.py
new file mode 100644 (file)
index 0000000..8c06bcf
--- /dev/null
@@ -0,0 +1,439 @@
+"""
+image HDU classes for fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+  Copyright (C) 2011  Erin Sheldon, BNL.  erin dot sheldon at gmail dot com
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""
+from __future__ import with_statement, print_function
+from functools import reduce
+
+import numpy
+
+from math import floor
+from .base import HDUBase, IMAGE_HDU
+from ..util import IS_PY3, array_to_native
+
+# for python3 compat
+if IS_PY3:
+    xrange = range
+
+
+class ImageHDU(HDUBase):
+    def _update_info(self):
+        """
+        Call parent method and make sure this is in fact a
+        image HDU.  Set dims in C order
+        """
+        super(ImageHDU, self)._update_info()
+
+        if self._info['hdutype'] != IMAGE_HDU:
+            mess = "Extension %s is not a Image HDU" % self.ext
+            raise ValueError(mess)
+
+        # convert to c order
+        if 'dims' in self._info:
+            self._info['dims'] = list(reversed(self._info['dims']))
+
+    def has_data(self):
+        """
+        Determine if this HDU has any data
+
+        For images, check that the dimensions are not zero.
+
+        For tables, check that the row count is not zero
+        """
+        ndims = self._info.get('ndims', 0)
+        if ndims == 0:
+            return False
+        else:
+            return True
+
+    def is_compressed(self):
+        """
+        returns true of this extension is compressed
+        """
+        return self._info['is_compressed_image'] == 1
+
+    def get_comptype(self):
+        """
+        Get the compression type.
+
+        None if the image is not compressed.
+        """
+        return self._info['comptype']
+
+    def get_dims(self):
+        """
+        get the shape of the image.  Returns () for empty
+        """
+        if self._info['ndims'] != 0:
+            dims = self._info['dims']
+        else:
+            dims = ()
+
+        return dims
+
+    def reshape(self, dims):
+        """
+        reshape an existing image to the requested dimensions
+
+        parameters
+        ----------
+        dims: sequence
+            Any sequence convertible to i8
+        """
+
+        adims = numpy.array(dims, ndmin=1, dtype='i8')
+        self._FITS.reshape_image(self._ext+1, adims)
+
+    def write(self, img, start=0, **keys):
+        """
+        Write the image into this HDU
+
+        If data already exist in this HDU, they will be overwritten.  If the
+        image to write is larger than the image on disk, or if the start
+        position is such that the write would extend beyond the existing
+        dimensions, the on-disk image is expanded.
+
+        parameters
+        ----------
+        img: ndarray
+            A simple numpy ndarray
+        start: integer or sequence
+            Where to start writing data.  Can be an integer offset
+            into the entire array, or a sequence determining where
+            in N-dimensional space to start.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        dims = self.get_dims()
+
+        if img.dtype.fields is not None:
+            raise ValueError("got recarray, expected regular ndarray")
+        if img.size == 0:
+            raise ValueError("data must have at least 1 row")
+
+        # data must be c-contiguous and native byte order
+        if not img.flags['C_CONTIGUOUS']:
+            # this always makes a copy
+            img_send = numpy.ascontiguousarray(img)
+            array_to_native(img_send, inplace=True)
+        else:
+            img_send = array_to_native(img, inplace=False)
+
+        if IS_PY3 and img_send.dtype.char == 'U':
+            # for python3, we convert unicode to ascii
+            # this will error if the character is not in ascii
+            img_send = img_send.astype('S', copy=False)
+
+        if not numpy.isscalar(start):
+            # convert to scalar offset
+            # note we use the on-disk data type to get itemsize
+
+            offset = _convert_full_start_to_offset(dims, start)
+        else:
+            offset = start
+
+        # see if we need to resize the image
+        if self.has_data():
+            self._expand_if_needed(dims, img.shape, start, offset)
+
+        self._FITS.write_image(self._ext+1, img_send, offset+1)
+        self._update_info()
+
+    def read(self, **keys):
+        """
+        Read the image.
+
+        If the HDU is an IMAGE_HDU, read the corresponding image.  Compression
+        and scaling are dealt with properly.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if not self.has_data():
+            return None
+
+        dtype, shape = self._get_dtype_and_shape()
+        array = numpy.zeros(shape, dtype=dtype)
+        self._FITS.read_image(self._ext+1, array)
+        return array
+
+    def _get_dtype_and_shape(self):
+        """
+        Get the numpy dtype and shape for image
+        """
+        npy_dtype = self._get_image_numpy_dtype()
+
+        if self._info['ndims'] != 0:
+            shape = self._info['dims']
+        else:
+            raise IOError("no image present in HDU")
+
+        return npy_dtype, shape
+
+    def _get_image_numpy_dtype(self):
+        """
+        Get the numpy dtype for the image
+        """
+        try:
+            ftype = self._info['img_equiv_type']
+            npy_type = _image_bitpix2npy[ftype]
+        except KeyError:
+            raise KeyError("unsupported fits data type: %d" % ftype)
+
+        return npy_type
+
+    def __getitem__(self, arg):
+        """
+        Get data from an image using python [] slice notation.
+
+        e.g., [2:25, 4:45].
+        """
+        return self._read_image_slice(arg)
+
+    def _read_image_slice(self, arg):
+        """
+        workhorse to read a slice
+        """
+        if 'ndims' not in self._info:
+            raise ValueError("Attempt to slice empty extension")
+
+        if isinstance(arg, slice):
+            # one-dimensional, e.g. 2:20
+            return self._read_image_slice((arg,))
+
+        if not isinstance(arg, tuple):
+            raise ValueError("arguments must be slices, one for each "
+                             "dimension, e.g. [2:5] or [2:5,8:25] etc.")
+
+        # should be a tuple of slices, one for each dimension
+        # e.g. [2:3, 8:100]
+        nd = len(arg)
+        if nd != self._info['ndims']:
+            raise ValueError("Got slice dimensions %d, "
+                             "expected %d" % (nd, self._info['ndims']))
+
+        targ = arg
+        arg = []
+        for a in targ:
+            if isinstance(a, slice):
+                arg.append(a)
+            elif isinstance(a, int):
+                arg.append(slice(a, a+1, 1))
+            else:
+                raise ValueError("arguments must be slices, e.g. 2:12")
+
+        dims = self._info['dims']
+        arrdims = []
+        first = []
+        last = []
+        steps = []
+        npy_dtype = self._get_image_numpy_dtype()
+
+        # check the args and reverse dimensions since
+        # fits is backwards from numpy
+        dim = 0
+        for slc in arg:
+            start = slc.start
+            stop = slc.stop
+            step = slc.step
+
+            if start is None:
+                start = 0
+            if stop is None:
+                stop = dims[dim]
+            if step is None:
+                # Ensure sane defaults.
+                if start <= stop:
+                    step = 1
+                else:
+                    step = -1
+
+            # Sanity checks for proper syntax.
+            if (step > 0 and stop < start) or (step < 0 and start < stop):
+                return numpy.empty(0, dtype=npy_dtype)
+            if start < 0:
+                start = dims[dim] + start
+                if start < 0:
+                    raise IndexError("Index out of bounds")
+
+            if stop < 0:
+                stop = dims[dim] + start + 1
+
+            # move to 1-offset
+            start = start + 1
+
+            if stop > dims[dim]:
+                stop = dims[dim]
+            if stop < start:
+                # A little black magic here.  The stop is offset by 2 to
+                # accommodate the 1-offset of CFITSIO, and to move past the end
+                # pixel to get the complete set after it is flipped along the
+                # axis.  Maybe there is a clearer way to accomplish what this
+                # offset is glossing over.
+                # @at88mph 2019.10.10
+                stop = stop + 2
+
+            first.append(start)
+            last.append(stop)
+
+            # Negative step values are not used in CFITSIO as the dimension is
+            # already properly calcualted.
+            # @at88mph 2019.10.21
+            steps.append(abs(step))
+            arrdims.append(int(floor((stop - start) / step)) + 1)
+
+            dim += 1
+
+        first.reverse()
+        last.reverse()
+        steps.reverse()
+        first = numpy.array(first, dtype='i8')
+        last = numpy.array(last, dtype='i8')
+        steps = numpy.array(steps, dtype='i8')
+
+        array = numpy.zeros(arrdims, dtype=npy_dtype)
+        self._FITS.read_image_slice(self._ext+1, first, last, steps,
+                                    self._ignore_scaling, array)
+        return array
+
+    def _expand_if_needed(self, dims, write_dims, start, offset):
+        """
+        expand the on-disk image if the indended write will extend
+        beyond the existing dimensions
+        """
+        from operator import mul
+
+        if numpy.isscalar(start):
+            start_is_scalar = True
+        else:
+            start_is_scalar = False
+
+        existing_size = reduce(mul, dims, 1)
+        required_size = offset + reduce(mul, write_dims, 1)
+
+        if required_size > existing_size:
+            # we need to expand the image
+            ndim = len(dims)
+            idim = len(write_dims)
+
+            if start_is_scalar:
+                if start == 0:
+                    start = [0]*ndim
+                else:
+                    raise ValueError(
+                        "When expanding "
+                        "an existing image while writing, the start keyword "
+                        "must have the same number of dimensions "
+                        "as the image or be exactly 0, got %s " % start)
+
+            if idim != ndim:
+                raise ValueError(
+                    "When expanding "
+                    "an existing image while writing, the input image "
+                    "must have the same number of dimensions "
+                    "as the original.  "
+                    "Got %d instead of %d" % (idim, ndim))
+            new_dims = []
+            for i in xrange(ndim):
+                required_dim = start[i] + write_dims[i]
+
+                if required_dim < dims[i]:
+                    # careful not to shrink the image!
+                    dimsize = dims[i]
+                else:
+                    dimsize = required_dim
+
+                new_dims.append(dimsize)
+
+            self.reshape(new_dims)
+
+    def __repr__(self):
+        """
+        Representation for ImageHDU
+        """
+        text, spacing = self._get_repr_list()
+        text.append("%simage info:" % spacing)
+        cspacing = ' '*4
+
+        # need this check for when we haven't written data yet
+        if 'ndims' in self._info:
+            if self._info['comptype'] is not None:
+                text.append(
+                    "%scompression: %s" % (cspacing, self._info['comptype']))
+
+            if self._info['ndims'] != 0:
+                dimstr = [str(d) for d in self._info['dims']]
+                dimstr = ",".join(dimstr)
+            else:
+                dimstr = ''
+
+            dt = _image_bitpix2npy[self._info['img_equiv_type']]
+            text.append("%sdata type: %s" % (cspacing, dt))
+            text.append("%sdims: [%s]" % (cspacing, dimstr))
+
+        text = '\n'.join(text)
+        return text
+
+
+def _convert_full_start_to_offset(dims, start):
+    # convert to scalar offset
+    # note we use the on-disk data type to get itemsize
+    ndim = len(dims)
+
+    # convert sequence to pixel start
+    if len(start) != ndim:
+        m = "start has len %d, which does not match requested dims %d"
+        raise ValueError(m % (len(start), ndim))
+
+    # this is really strides / itemsize
+    strides = [1]
+    for i in xrange(1, ndim):
+        strides.append(strides[i-1] * dims[ndim-i])
+
+    strides.reverse()
+    s = start
+    start_index = sum([s[i]*strides[i] for i in xrange(ndim)])
+
+    return start_index
+
+
+# remember, you should be using the equivalent image type for this
+_image_bitpix2npy = {
+    8: 'u1',
+    10: 'i1',
+    16: 'i2',
+    20: 'u2',
+    32: 'i4',
+    40: 'u4',
+    64: 'i8',
+    -32: 'f4',
+    -64: 'f8'}
diff --git a/fitsio/hdu/table.py b/fitsio/hdu/table.py
new file mode 100644 (file)
index 0000000..30c85a0
--- /dev/null
@@ -0,0 +1,2496 @@
+"""
+image HDU classes for fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+  Copyright (C) 2011  Erin Sheldon, BNL.  erin dot sheldon at gmail dot com
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""
+from __future__ import with_statement, print_function
+import copy
+import warnings
+from functools import reduce
+
+import numpy
+
+from ..util import (
+    IS_PY3,
+    isstring,
+    isinteger,
+    is_object,
+    fields_are_object,
+    array_to_native,
+    array_to_native_c,
+    FITSRuntimeWarning,
+    mks
+)
+from .base import HDUBase, ASCII_TBL, IMAGE_HDU, _hdu_type_map
+
+# for python3 compat
+if IS_PY3:
+    xrange = range
+
+
+class TableHDU(HDUBase):
+    """
+    A table HDU
+
+    parameters
+    ----------
+    fits: FITS object
+        An instance of a _fistio_wrap.FITS object.  This is the low-level
+        python object, not the FITS object defined above.
+    ext: integer
+        The extension number.
+    lower: bool, optional
+        If True, force all columns names to lower case in output
+    upper: bool, optional
+        If True, force all columns names to upper case in output
+    trim_strings: bool, optional
+        If True, trim trailing spaces from strings. Default is False.
+    vstorage: string, optional
+        Set the default method to store variable length columns.  Can be
+        'fixed' or 'object'.  See docs on fitsio.FITS for details.
+    case_sensitive: bool, optional
+        Match column names and extension names with case-sensitivity.  Default
+        is False.
+    iter_row_buffer: integer
+        Number of rows to buffer when iterating over table HDUs.
+        Default is 1.
+    write_bitcols: bool, optional
+        If True, write logicals a a bit column. Default is False.
+    """
+    def __init__(self, fits, ext,
+                 lower=False, upper=False, trim_strings=False,
+                 vstorage='fixed', case_sensitive=False, iter_row_buffer=1,
+                 write_bitcols=False, **keys):
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        # NOTE: The defaults of False above cannot be changed since they
+        # are or'ed with the method defaults below.
+        super(TableHDU, self).__init__(fits, ext)
+
+        self.lower = lower
+        self.upper = upper
+        self.trim_strings = trim_strings
+
+        self._vstorage = vstorage
+        self.case_sensitive = case_sensitive
+        self._iter_row_buffer = iter_row_buffer
+        self.write_bitcols = write_bitcols
+
+        if self._info['hdutype'] == ASCII_TBL:
+            self._table_type_str = 'ascii'
+        else:
+            self._table_type_str = 'binary'
+
+    def get_nrows(self):
+        """
+        Get number of rows in the table.
+        """
+        nrows = self._info.get('nrows', None)
+        if nrows is None:
+            raise ValueError("nrows not in info table; this is a bug")
+        return nrows
+
+    def get_colnames(self):
+        """
+        Get a copy of the column names for a table HDU
+        """
+        return copy.copy(self._colnames)
+
+    def get_colname(self, colnum):
+        """
+        Get the name associated with the given column number
+
+        parameters
+        ----------
+        colnum: integer
+            The number for the column, zero offset
+        """
+        if colnum < 0 or colnum > (len(self._colnames)-1):
+            raise ValueError(
+                "colnum out of range [0,%s-1]" % (0, len(self._colnames)))
+        return self._colnames[colnum]
+
+    def get_vstorage(self):
+        """
+        Get a string representing the storage method for variable length
+        columns
+        """
+        return copy.copy(self._vstorage)
+
+    def has_data(self):
+        """
+        Determine if this HDU has any data
+
+        Check that the row count is not zero
+        """
+        if self._info['nrows'] > 0:
+            return True
+        else:
+            return False
+
+    def where(self, expression):
+        """
+        Return the indices where the expression evaluates to true.
+
+        parameters
+        ----------
+        expression: string
+            A fits row selection expression.  E.g.
+            "x > 3 && y < 5"
+        """
+        return self._FITS.where(self._ext+1, expression)
+
+    def write(self, data, firstrow=0, columns=None, names=None, slow=False,
+              **keys):
+        """
+        Write data into this HDU
+
+        parameters
+        ----------
+        data: ndarray or list of ndarray
+            A numerical python array.  Should be an ordinary array for image
+            HDUs, should have fields for tables.  To write an ordinary array to
+            a column in a table HDU, use write_column.  If data already exists
+            in this HDU, it will be overwritten.  See the append(() method to
+            append new rows to a table HDU.
+        firstrow: integer, optional
+            At which row you should begin writing to tables.  Be sure you know
+            what you are doing!  For appending see the append() method.
+            Default 0.
+        columns: list, optional
+            If data is a list of arrays, you must send columns as a list
+            of names or column numbers. You can also use the `names` keyword
+            argument.
+        names: list, optional
+            If data is a list of arrays, you must send columns as a list
+            of names or column numbers. You can also use the `columns` keyword
+            argument.
+        slow: bool, optional
+            If True, use a slower method to write one column at a time. Useful
+            for debugging.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        isrec = False
+        if isinstance(data, (list, dict)):
+            if isinstance(data, list):
+                data_list = data
+                if columns is not None:
+                    columns_all = columns
+                elif names is not None:
+                    columns_all = names
+                else:
+                    raise ValueError(
+                        "you must send `columns` or `names` "
+                        "with a list of arrays")
+            else:
+                columns_all = list(data.keys())
+                data_list = [data[n] for n in columns_all]
+
+            colnums_all = [self._extract_colnum(c) for c in columns_all]
+            names = [self.get_colname(c) for c in colnums_all]
+
+            isobj = numpy.zeros(len(data_list), dtype=bool)
+            for i in xrange(len(data_list)):
+                isobj[i] = is_object(data_list[i])
+
+        else:
+            if data.dtype.fields is None:
+                raise ValueError("You are writing to a table, so I expected "
+                                 "an array with fields as input. If you want "
+                                 "to write a simple array, you should use "
+                                 "write_column to write to a single column, "
+                                 "or instead write to an image hdu")
+
+            if data.shape == ():
+                raise ValueError("cannot write data with shape ()")
+
+            isrec = True
+            names = data.dtype.names
+            # only write object types (variable-length columns) after
+            # writing the main table
+            isobj = fields_are_object(data)
+
+            data_list = []
+            colnums_all = []
+            for i, name in enumerate(names):
+                colnum = self._extract_colnum(name)
+                data_list.append(data[name])
+                colnums_all.append(colnum)
+
+        if slow:
+            for i, name in enumerate(names):
+                if not isobj[i]:
+                    self.write_column(name, data_list[i], firstrow=firstrow)
+        else:
+
+            nonobj_colnums = []
+            nonobj_arrays = []
+            for i in xrange(len(data_list)):
+                if not isobj[i]:
+                    nonobj_colnums.append(colnums_all[i])
+                    if isrec:
+                        # this still leaves possibility of f-order sub-arrays..
+                        colref = array_to_native(data_list[i], inplace=False)
+                    else:
+                        colref = array_to_native_c(data_list[i], inplace=False)
+
+                    if IS_PY3 and colref.dtype.char == 'U':
+                        # for python3, we convert unicode to ascii
+                        # this will error if the character is not in ascii
+                        colref = colref.astype('S', copy=False)
+
+                    nonobj_arrays.append(colref)
+
+            for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays):
+                self._verify_column_data(tcolnum, tdata)
+
+            if len(nonobj_arrays) > 0:
+                self._FITS.write_columns(
+                    self._ext+1, nonobj_colnums, nonobj_arrays,
+                    firstrow=firstrow+1, write_bitcols=self.write_bitcols)
+
+        # writing the object arrays always occurs the same way
+        # need to make sure this works for array fields
+        for i, name in enumerate(names):
+            if isobj[i]:
+                self.write_var_column(name, data_list[i], firstrow=firstrow)
+
+        self._update_info()
+
+    def write_column(self, column, data, firstrow=0, **keys):
+        """
+        Write data to a column in this HDU
+
+        This HDU must be a table HDU.
+
+        parameters
+        ----------
+        column: scalar string/integer
+            The column in which to write.  Can be the name or number (0 offset)
+        data: ndarray
+            Numerical python array to write.  This should match the
+            shape of the column.  You are probably better using
+            fits.write_table() to be sure.
+        firstrow: integer, optional
+            At which row you should begin writing.  Be sure you know what you
+            are doing!  For appending see the append() method.  Default 0.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        colnum = self._extract_colnum(column)
+
+        # need it to be contiguous and native byte order.  For now, make a
+        # copy.  but we may be able to avoid this with some care.
+
+        if not data.flags['C_CONTIGUOUS']:
+            # this always makes a copy
+            data_send = numpy.ascontiguousarray(data)
+            # this is a copy, we can make sure it is native
+            # and modify in place if needed
+            array_to_native(data_send, inplace=True)
+        else:
+            # we can avoid the copy with a try-finally block and
+            # some logic
+            data_send = array_to_native(data, inplace=False)
+
+        if IS_PY3 and data_send.dtype.char == 'U':
+            # for python3, we convert unicode to ascii
+            # this will error if the character is not in ascii
+            data_send = data_send.astype('S', copy=False)
+
+        self._verify_column_data(colnum, data_send)
+
+        self._FITS.write_columns(
+            self._ext+1,
+            [colnum],
+            [data_send],
+            firstrow=firstrow+1,
+            write_bitcols=self.write_bitcols,
+        )
+
+        del data_send
+        self._update_info()
+
+    def _verify_column_data(self, colnum, data):
+        """
+        verify the input data is of the correct type and shape
+        """
+        this_dt = data.dtype.descr[0]
+
+        if len(data.shape) > 2:
+            this_shape = data.shape[1:]
+        elif len(data.shape) == 2 and data.shape[1] > 1:
+            this_shape = data.shape[1:]
+        else:
+            this_shape = ()
+
+        this_npy_type = this_dt[1][1:]
+
+        npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+        info = self._info['colinfo'][colnum]
+
+        if npy_type[0] in ['>', '<', '|']:
+            npy_type = npy_type[1:]
+
+        col_name = info['name']
+        col_tdim = info['tdim']
+        col_shape = _tdim2shape(
+            col_tdim, col_name, is_string=(npy_type[0] == 'S'))
+
+        if col_shape is None:
+            if this_shape == ():
+                this_shape = None
+
+        if col_shape is not None and not isinstance(col_shape, tuple):
+            col_shape = (col_shape,)
+
+        """
+        print('column name:',col_name)
+        print(data.shape)
+        print('col tdim', info['tdim'])
+        print('column dtype:',npy_type)
+        print('input dtype:',this_npy_type)
+        print('column shape:',col_shape)
+        print('input shape:',this_shape)
+        print()
+        """
+
+        # this mismatch is OK
+        if npy_type == 'i1' and this_npy_type == 'b1':
+            this_npy_type = 'i1'
+
+        if isinstance(self, AsciiTableHDU):
+            # we don't enforce types exact for ascii
+            if npy_type == 'i8' and this_npy_type in ['i2', 'i4']:
+                this_npy_type = 'i8'
+            elif npy_type == 'f8' and this_npy_type == 'f4':
+                this_npy_type = 'f8'
+
+        if this_npy_type != npy_type:
+            raise ValueError(
+                "bad input data for column '%s': "
+                "expected '%s', got '%s'" % (
+                    col_name, npy_type, this_npy_type))
+
+        if this_shape != col_shape:
+            raise ValueError(
+                "bad input shape for column '%s': "
+                "expected '%s', got '%s'" % (col_name, col_shape, this_shape))
+
+    def write_var_column(self, column, data, firstrow=0, **keys):
+        """
+        Write data to a variable-length column in this HDU
+
+        This HDU must be a table HDU.
+
+        parameters
+        ----------
+        column: scalar string/integer
+            The column in which to write.  Can be the name or number (0 offset)
+        column: ndarray
+            Numerical python array to write.  This must be an object array.
+        firstrow: integer, optional
+            At which row you should begin writing.  Be sure you know what you
+            are doing!  For appending see the append() method.  Default 0.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if not is_object(data):
+            raise ValueError("Only object fields can be written to "
+                             "variable-length arrays")
+        colnum = self._extract_colnum(column)
+
+        self._FITS.write_var_column(self._ext+1, colnum+1, data,
+                                    firstrow=firstrow+1)
+        self._update_info()
+
+    def insert_column(self, name, data, colnum=None, write_bitcols=None,
+                      **keys):
+        """
+        Insert a new column.
+
+        parameters
+        ----------
+        name: string
+            The column name
+        data:
+            The data to write into the new column.
+        colnum: int, optional
+            The column number for the new column, zero-offset.  Default
+            is to add the new column after the existing ones.
+        write_bitcols: bool, optional
+            If set, write logical as bit cols. This can over-ride the
+            internal class setting. Default of None respects the inner
+            class setting.
+
+        Notes
+        -----
+        This method is used un-modified by ascii tables as well.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if write_bitcols is None:
+            write_bitcols = self.write_bitcols
+
+        if name in self._colnames:
+            raise ValueError("column '%s' already exists" % name)
+
+        if IS_PY3 and data.dtype.char == 'U':
+            # fast dtype conversion using an empty array
+            # we could hack at the actual text description, but using
+            # the numpy API is probably safer
+            # this also avoids doing a dtype conversion on every array
+            # element which could b expensive
+            descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr
+        else:
+            descr = data.dtype.descr
+
+        if len(descr) > 1:
+            raise ValueError("you can only insert a single column, "
+                             "requested: %s" % descr)
+
+        this_descr = descr[0]
+        this_descr = [name, this_descr[1]]
+        if len(data.shape) > 1:
+            this_descr += [data.shape[1:]]
+        this_descr = tuple(this_descr)
+
+        name, fmt, dims = _npy2fits(
+            this_descr,
+            table_type=self._table_type_str,
+            write_bitcols=write_bitcols,
+        )
+        if dims is not None:
+            dims = [dims]
+
+        if colnum is None:
+            new_colnum = len(self._info['colinfo']) + 1
+        else:
+            new_colnum = colnum+1
+
+        self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
+
+        self._update_info()
+
+        self.write_column(name, data)
+
+    def append(self, data, columns=None, names=None, **keys):
+        """
+        Append new rows to a table HDU
+
+        parameters
+        ----------
+        data: ndarray or list of arrays
+            A numerical python array with fields (recarray) or a list of
+            arrays.  Should have the same fields as the existing table. If only
+            a subset of the table columns are present, the other columns are
+            filled with zeros.
+        columns: list, optional
+            If data is a list of arrays, you must send columns as a list
+            of names or column numbers. You can also use the `names` keyword
+            argument.
+        names: list, optional
+            If data is a list of arrays, you must send columns as a list
+            of names or column numbers. You can also use the `columns` keyword
+            argument.
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        firstrow = self._info['nrows']
+        self.write(data, firstrow=firstrow, columns=None, names=None)
+
+    def delete_rows(self, rows):
+        """
+        Delete rows from the table
+
+        parameters
+        ----------
+        rows: sequence or slice
+            The exact rows to delete as a sequence, or a slice.
+
+        examples
+        --------
+            # delete a range of rows
+            with fitsio.FITS(fname,'rw') as fits:
+                fits['mytable'].delete_rows(slice(3,20))
+
+            # delete specific rows
+            with fitsio.FITS(fname,'rw') as fits:
+                rows2delete = [3,88,76]
+                fits['mytable'].delete_rows(rows2delete)
+        """
+
+        if rows is None:
+            return
+
+        # extract and convert to 1-offset for C routine
+        if isinstance(rows, slice):
+            rows = self._process_slice(rows)
+            if rows.step is not None and rows.step != 1:
+                rows = numpy.arange(
+                    rows.start+1,
+                    rows.stop+1,
+                    rows.step,
+                )
+            else:
+                # rows must be 1-offset
+                rows = slice(rows.start+1, rows.stop+1)
+        else:
+            rows = self._extract_rows(rows)
+            # rows must be 1-offset
+            rows += 1
+
+        if isinstance(rows, slice):
+            self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop)
+        else:
+            if rows.size == 0:
+                return
+
+            self._FITS.delete_rows(self._ext+1, rows)
+
+        self._update_info()
+
+    def resize(self, nrows, front=False):
+        """
+        Resize the table to the given size, removing or adding rows as
+        necessary.  Note if expanding the table at the end, it is more
+        efficient to use the append function than resizing and then
+        writing.
+
+        New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
+        which get -128,32768,2147483648 respectively
+
+        parameters
+        ----------
+        nrows: int
+            new size of table
+        front: bool, optional
+            If True, add or remove rows from the front.  Default
+            is False
+        """
+
+        nrows_current = self.get_nrows()
+        if nrows == nrows_current:
+            return
+
+        if nrows < nrows_current:
+            rowdiff = nrows_current - nrows
+            if front:
+                # delete from the front
+                start = 0
+                stop = rowdiff
+            else:
+                # delete from the back
+                start = nrows
+                stop = nrows_current
+
+            self.delete_rows(slice(start, stop))
+        else:
+            rowdiff = nrows - nrows_current
+            if front:
+                # in this case zero is what we want, since the code inserts
+                firstrow = 0
+            else:
+                firstrow = nrows_current
+            self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
+
+        self._update_info()
+
+    def read(self, columns=None, rows=None, vstorage=None,
+             upper=False, lower=False, trim_strings=False, **keys):
+        """
+        Read data from this HDU
+
+        By default, all data are read. You can set the `columns` and/or
+        `rows` keywords to read subsets of the data.
+
+        Table data is read into a numpy recarray. To get a single column as
+        a numpy.ndarray, use the `read_column` method.
+
+        Slice notation is also supported for `TableHDU` types.
+
+            >>> fits = fitsio.FITS(filename)
+            >>> fits[ext][:]
+            >>> fits[ext][2:5]
+            >>> fits[ext][200:235:2]
+            >>> fits[ext][rows]
+            >>> fits[ext][cols][rows]
+
+        parameters
+        ----------
+        columns: optional
+            An optional set of columns to read from table HDUs. Default is to
+            read all. Can be string or number. If a sequence, a recarray
+            is always returned. If a scalar, an ordinary array is returned.
+        rows: optional
+            An optional list of rows to read from table HDUS.  Default is to
+            read all.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns. Can
+            be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if columns is not None:
+            data = self.read_columns(
+                columns, rows=rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+        elif rows is not None:
+            # combinations of row and column subsets are covered by
+            # read_columns so we pass colnums=None here to get all columns
+            data = self.read_rows(
+                rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+        else:
+            data = self._read_all(
+                vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+
+        return data
+
+    def _read_all(self, vstorage=None,
+                  upper=False, lower=False, trim_strings=False, colnums=None,
+                  **keys):
+        """
+        Read all data in the HDU.
+
+        parameters
+        ----------
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns.  Can
+            be 'fixed' or 'object'.  See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        colnums: integer array, optional
+            The column numbers, 0 offset
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        dtype, offsets, isvar = self.get_rec_dtype(
+            colnums=colnums, vstorage=vstorage)
+
+        w, = numpy.where(isvar == True)  # noqa
+        has_tbit = self._check_tbit()
+
+        if w.size > 0:
+            if vstorage is None:
+                _vstorage = self._vstorage
+            else:
+                _vstorage = vstorage
+            colnums = self._extract_colnums()
+            rows = None
+            array = self._read_rec_with_var(colnums, rows, dtype,
+                                            offsets, isvar, _vstorage)
+        elif has_tbit:
+            # drop down to read_columns since we can't stuff into a
+            # contiguous array
+            colnums = self._extract_colnums()
+            array = self.read_columns(
+                colnums,
+                rows=None, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+        else:
+            firstrow = 1  # noqa - not used?
+            nrows = self._info['nrows']
+            array = numpy.zeros(nrows, dtype=dtype)
+
+            self._FITS.read_as_rec(self._ext+1, 1, nrows, array)
+
+            array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+            for colnum, name in enumerate(array.dtype.names):
+                self._rescale_and_convert_field_inplace(
+                    array,
+                    name,
+                    self._info['colinfo'][colnum]['tscale'],
+                    self._info['colinfo'][colnum]['tzero'])
+
+        if self.lower or lower:
+            _names_to_lower_if_recarray(array)
+        elif self.upper or upper:
+            _names_to_upper_if_recarray(array)
+
+        self._maybe_trim_strings(array, trim_strings=trim_strings)
+        return array
+
+    def read_column(self, col, rows=None, vstorage=None,
+                    upper=False, lower=False, trim_strings=False, **keys):
+        """
+        Read the specified column
+
+        Alternatively, you can use slice notation
+
+            >>> fits=fitsio.FITS(filename)
+            >>> fits[ext][colname][:]
+            >>> fits[ext][colname][2:5]
+            >>> fits[ext][colname][200:235:2]
+            >>> fits[ext][colname][rows]
+
+        Note, if reading multiple columns, it is more efficient to use
+        read(columns=) or slice notation with a list of column names.
+
+        parameters
+        ----------
+        col: string/int, required
+            The column name or number.
+        rows: optional
+            An optional set of row numbers to read.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns.  Can
+            be 'fixed' or 'object'.  See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        res = self.read_columns(
+            [col], rows=rows, vstorage=vstorage,
+            upper=upper, lower=lower, trim_strings=trim_strings)
+        colname = res.dtype.names[0]
+        data = res[colname]
+
+        self._maybe_trim_strings(data, trim_strings=trim_strings)
+        return data
+
+    def read_rows(self, rows, vstorage=None,
+                  upper=False, lower=False, trim_strings=False, **keys):
+        """
+        Read the specified rows.
+
+        parameters
+        ----------
+        rows: list,array
+            A list or array of row indices.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns.  Can
+            be 'fixed' or 'object'.  See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if rows is None:
+            # we actually want all rows!
+            return self._read_all()
+
+        if self._info['hdutype'] == ASCII_TBL:
+            return self.read(
+                rows=rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+
+        rows = self._extract_rows(rows)
+        dtype, offsets, isvar = self.get_rec_dtype(vstorage=vstorage)
+
+        w, = numpy.where(isvar == True)  # noqa
+        if w.size > 0:
+            if vstorage is None:
+                _vstorage = self._vstorage
+            else:
+                _vstorage = vstorage
+            colnums = self._extract_colnums()
+            return self._read_rec_with_var(
+                colnums, rows, dtype, offsets, isvar, _vstorage)
+        else:
+            array = numpy.zeros(rows.size, dtype=dtype)
+            self._FITS.read_rows_as_rec(self._ext+1, array, rows)
+
+            array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+            for colnum, name in enumerate(array.dtype.names):
+                self._rescale_and_convert_field_inplace(
+                    array,
+                    name,
+                    self._info['colinfo'][colnum]['tscale'],
+                    self._info['colinfo'][colnum]['tzero'])
+
+        if self.lower or lower:
+            _names_to_lower_if_recarray(array)
+        elif self.upper or upper:
+            _names_to_upper_if_recarray(array)
+
+        self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+        return array
+
+    def read_columns(self, columns, rows=None, vstorage=None,
+                     upper=False, lower=False, trim_strings=False, **keys):
+        """
+        read a subset of columns from this binary table HDU
+
+        By default, all rows are read.  Send rows= to select subsets of the
+        data.  Table data are read into a recarray for multiple columns,
+        plain array for a single column.
+
+        parameters
+        ----------
+        columns: list/array
+            An optional set of columns to read from table HDUs.  Can be string
+            or number. If a sequence, a recarray is always returned.  If a
+            scalar, an ordinary array is returned.
+        rows: list/array, optional
+            An optional list of rows to read from table HDUS.  Default is to
+            read all.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns.  Can
+            be 'fixed' or 'object'.  See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if self._info['hdutype'] == ASCII_TBL:
+            return self.read(
+                columns=columns, rows=rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+
+        # if columns is None, returns all.  Guaranteed to be unique and sorted
+        colnums = self._extract_colnums(columns)
+        if isinstance(colnums, int):
+            # scalar sent, don't read as a recarray
+            return self.read_column(
+                columns,
+                rows=rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+
+        # if rows is None still returns None, and is correctly interpreted
+        # by the reader to mean all
+        rows = self._extract_rows(rows)
+
+        # this is the full dtype for all columns
+        dtype, offsets, isvar = self.get_rec_dtype(
+            colnums=colnums, vstorage=vstorage)
+
+        w, = numpy.where(isvar == True)  # noqa
+        if w.size > 0:
+            if vstorage is None:
+                _vstorage = self._vstorage
+            else:
+                _vstorage = vstorage
+            array = self._read_rec_with_var(
+                colnums, rows, dtype, offsets, isvar, _vstorage)
+        else:
+
+            if rows is None:
+                nrows = self._info['nrows']
+            else:
+                nrows = rows.size
+            array = numpy.zeros(nrows, dtype=dtype)
+
+            colnumsp = colnums[:].copy()
+            colnumsp[:] += 1
+            self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows)
+
+            array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+            for i in xrange(colnums.size):
+                colnum = int(colnums[i])
+                name = array.dtype.names[i]
+                self._rescale_and_convert_field_inplace(
+                    array,
+                    name,
+                    self._info['colinfo'][colnum]['tscale'],
+                    self._info['colinfo'][colnum]['tzero'])
+
+        if (self._check_tbit(colnums=colnums)):
+            array = self._fix_tbit_dtype(array, colnums)
+
+        if self.lower or lower:
+            _names_to_lower_if_recarray(array)
+        elif self.upper or upper:
+            _names_to_upper_if_recarray(array)
+
+        self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+        return array
+
+    def read_slice(self, firstrow, lastrow, step=1,
+                   vstorage=None, lower=False, upper=False,
+                   trim_strings=False, **keys):
+        """
+        Read the specified row slice from a table.
+
+        Read all rows between firstrow and lastrow (non-inclusive, as per
+        python slice notation).  Note you must use slice notation for
+        images, e.g. f[ext][20:30, 40:50]
+
+        parameters
+        ----------
+        firstrow: integer
+            The first row to read
+        lastrow: integer
+            The last row to read, non-inclusive.  This follows the python list
+            slice convention that one does not include the last element.
+        step: integer, optional
+            Step between rows, default 1. e.g., if step is 2, skip every other
+            row.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns.  Can
+            be 'fixed' or 'object'.  See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if self._info['hdutype'] == ASCII_TBL:
+            rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
+            return self.read_ascii(
+                rows=rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+
+        if self._info['hdutype'] == IMAGE_HDU:
+            raise ValueError("slices currently only supported for tables")
+
+        maxrow = self._info['nrows']
+        if firstrow < 0 or lastrow > maxrow:
+            raise ValueError(
+                "slice must specify a sub-range of [%d,%d]" % (0, maxrow))
+
+        dtype, offsets, isvar = self.get_rec_dtype(vstorage=vstorage)
+
+        w, = numpy.where(isvar == True)  # noqa
+        if w.size > 0:
+            if vstorage is None:
+                _vstorage = self._vstorage
+            else:
+                _vstorage = vstorage
+            rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
+            colnums = self._extract_colnums()
+            array = self._read_rec_with_var(
+                colnums, rows, dtype, offsets, isvar, _vstorage)
+        else:
+            if step != 1:
+                rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
+                array = self.read(rows=rows)
+            else:
+                # no +1 because lastrow is non-inclusive
+                nrows = lastrow - firstrow
+                array = numpy.zeros(nrows, dtype=dtype)
+
+                # only first needs to be +1.  This is becuase the c code is
+                # inclusive
+                self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array)
+
+                array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(
+                    array)
+
+                for colnum, name in enumerate(array.dtype.names):
+                    self._rescale_and_convert_field_inplace(
+                        array,
+                        name,
+                        self._info['colinfo'][colnum]['tscale'],
+                        self._info['colinfo'][colnum]['tzero'])
+
+        if self.lower or lower:
+            _names_to_lower_if_recarray(array)
+        elif self.upper or upper:
+            _names_to_upper_if_recarray(array)
+
+        self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+        return array
+
+    def get_rec_dtype(self, colnums=None, vstorage=None, **keys):
+        """
+        Get the dtype for the specified columns
+
+        parameters
+        ----------
+        colnums: integer array, optional
+            The column numbers, 0 offset
+        vstorage: string, optional
+            See docs in read_columns
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if vstorage is None:
+            _vstorage = self._vstorage
+        else:
+            _vstorage = vstorage
+
+        if colnums is None:
+            colnums = self._extract_colnums()
+
+        descr = []
+        isvararray = numpy.zeros(len(colnums), dtype=bool)
+        for i, colnum in enumerate(colnums):
+            dt, isvar = self.get_rec_column_descr(colnum, _vstorage)
+            descr.append(dt)
+            isvararray[i] = isvar
+        dtype = numpy.dtype(descr)
+
+        offsets = numpy.zeros(len(colnums), dtype='i8')
+        for i, n in enumerate(dtype.names):
+            offsets[i] = dtype.fields[n][1]
+        return dtype, offsets, isvararray
+
+    def _check_tbit(self, colnums=None, **keys):
+        """
+        Check if one of the columns is a TBIT column
+
+        parameters
+        ----------
+        colnums: integer array, optional
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if colnums is None:
+            colnums = self._extract_colnums()
+
+        has_tbit = False
+        for i, colnum in enumerate(colnums):
+            npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+            if (istbit):
+                has_tbit = True
+                break
+
+        return has_tbit
+
+    def _fix_tbit_dtype(self, array, colnums):
+        """
+        If necessary, patch up the TBIT to convert to bool array
+
+        parameters
+        ----------
+        array: record array
+        colnums: column numbers for lookup
+        """
+        descr = array.dtype.descr
+        for i, colnum in enumerate(colnums):
+            npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+            if (istbit):
+                coldescr = list(descr[i])
+                coldescr[1] = '?'
+                descr[i] = tuple(coldescr)
+
+        return array.view(descr)
+
+    def _get_simple_dtype_and_shape(self, colnum, rows=None):
+        """
+        When reading a single column, we want the basic data
+        type and the shape of the array.
+
+        for scalar columns, shape is just nrows, otherwise
+        it is (nrows, dim1, dim2)
+
+        Note if rows= is sent and only a single row is requested,
+        the shape will be (dim2,dim2)
+        """
+
+        # basic datatype
+        npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+        info = self._info['colinfo'][colnum]
+        name = info['name']
+
+        if rows is None:
+            nrows = self._info['nrows']
+        else:
+            nrows = rows.size
+
+        shape = None
+        tdim = info['tdim']
+
+        shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
+        if shape is not None:
+            if nrows > 1:
+                if not isinstance(shape, tuple):
+                    # vector
+                    shape = (nrows, shape)
+                else:
+                    # multi-dimensional
+                    shape = tuple([nrows] + list(shape))
+        else:
+            # scalar
+            shape = nrows
+        return npy_type, shape
+
+    def get_rec_column_descr(self, colnum, vstorage):
+        """
+        Get a descriptor entry for the specified column.
+
+        parameters
+        ----------
+        colnum: integer
+            The column number, 0 offset
+        vstorage: string
+            See docs in read_columns
+        """
+        npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+        name = self._info['colinfo'][colnum]['name']
+
+        if isvar:
+            if vstorage == 'object':
+                descr = (name, 'O')
+            else:
+                tform = self._info['colinfo'][colnum]['tform']
+                max_size = _extract_vararray_max(tform)
+
+                if max_size <= 0:
+                    name = self._info['colinfo'][colnum]['name']
+                    mess = 'Will read as an object field'
+                    if max_size < 0:
+                        mess = "Column '%s': No maximum size: '%s'. %s"
+                        mess = mess % (name, tform, mess)
+                        warnings.warn(mess, FITSRuntimeWarning)
+                    else:
+                        mess = "Column '%s': Max size is zero: '%s'. %s"
+                        mess = mess % (name, tform, mess)
+                        warnings.warn(mess, FITSRuntimeWarning)
+
+                    # we are forced to read this as an object array
+                    return self.get_rec_column_descr(colnum, 'object')
+
+                if npy_type[0] == 'S':
+                    # variable length string columns cannot
+                    # themselves be arrays I don't think
+                    npy_type = 'S%d' % max_size
+                    descr = (name, npy_type)
+                elif npy_type[0] == 'U':
+                    # variable length string columns cannot
+                    # themselves be arrays I don't think
+                    npy_type = 'U%d' % max_size
+                    descr = (name, npy_type)
+                else:
+                    descr = (name, npy_type, max_size)
+        else:
+            tdim = self._info['colinfo'][colnum]['tdim']
+            shape = _tdim2shape(
+                tdim, name,
+                is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
+            if shape is not None:
+                descr = (name, npy_type, shape)
+            else:
+                descr = (name, npy_type)
+        return descr, isvar
+
+    def _read_rec_with_var(
+            self, colnums, rows, dtype, offsets, isvar, vstorage):
+        """
+        Read columns from a table into a rec array, including variable length
+        columns.  This is special because, for efficiency, it involves reading
+        from the main table as normal but skipping the columns in the array
+        that are variable.  Then reading the variable length columns, with
+        accounting for strides appropriately.
+
+        row and column numbers should be checked before calling this function
+        """
+
+        colnumsp = colnums+1
+        if rows is None:
+            nrows = self._info['nrows']
+        else:
+            nrows = rows.size
+        array = numpy.zeros(nrows, dtype=dtype)
+
+        # read from the main table first
+        wnotvar, = numpy.where(isvar == False)  # noqa
+        if wnotvar.size > 0:
+            # this will be contiguous (not true for slices)
+            thesecol = colnumsp[wnotvar]
+            theseoff = offsets[wnotvar]
+            self._FITS.read_columns_as_rec_byoffset(self._ext+1,
+                                                    thesecol,
+                                                    theseoff,
+                                                    array,
+                                                    rows)
+            for i in xrange(thesecol.size):
+
+                name = array.dtype.names[wnotvar[i]]
+                colnum = thesecol[i]-1
+                self._rescale_and_convert_field_inplace(
+                    array,
+                    name,
+                    self._info['colinfo'][colnum]['tscale'],
+                    self._info['colinfo'][colnum]['tzero'])
+
+        array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+        # now read the variable length arrays we may be able to speed this up
+        # by storing directly instead of reading first into a list
+        wvar, = numpy.where(isvar == True)  # noqa
+        if wvar.size > 0:
+            # this will be contiguous (not true for slices)
+            thesecol = colnumsp[wvar]
+            for i in xrange(thesecol.size):
+                colnump = thesecol[i]
+                name = array.dtype.names[wvar[i]]
+                dlist = self._FITS.read_var_column_as_list(
+                    self._ext+1, colnump, rows)
+
+                if (isinstance(dlist[0], str) or
+                        (IS_PY3 and isinstance(dlist[0], bytes))):
+                    is_string = True
+                else:
+                    is_string = False
+
+                if array[name].dtype.descr[0][1][1] == 'O':
+                    # storing in object array
+                    # get references to each, no copy made
+                    for irow, item in enumerate(dlist):
+                        if IS_PY3 and isinstance(item, bytes):
+                            item = item.decode('ascii')
+                        array[name][irow] = item
+                else:
+                    for irow, item in enumerate(dlist):
+                        if IS_PY3 and isinstance(item, bytes):
+                            item = item.decode('ascii')
+
+                        if is_string:
+                            array[name][irow] = item
+                        else:
+                            ncopy = len(item)
+
+                            if IS_PY3:
+                                ts = array[name].dtype.descr[0][1][1]
+                                if ts != 'S' and ts != 'U':
+                                    array[name][irow][0:ncopy] = item[:]
+                                else:
+                                    array[name][irow] = item
+                            else:
+                                array[name][irow][0:ncopy] = item[:]
+
+        return array
+
+    def _extract_rows(self, rows):
+        """
+        Extract an array of rows from an input scalar or sequence
+        """
+        if rows is not None:
+            rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8')
+            # returns unique, sorted
+            rows = numpy.unique(rows)
+
+            maxrow = self._info['nrows']-1
+            if len(rows) > 0 and (rows[0] < 0 or rows[-1] > maxrow):
+                raise ValueError("rows must be in [%d,%d]" % (0, maxrow))
+        return rows
+
+    def _process_slice(self, arg):
+        """
+        process the input slice for use calling the C code
+        """
+        start = arg.start
+        stop = arg.stop
+        step = arg.step
+
+        nrows = self._info['nrows']
+        if step is None:
+            step = 1
+        if start is None:
+            start = 0
+        if stop is None:
+            stop = nrows
+
+        if start < 0:
+            start = nrows + start
+            if start < 0:
+                raise IndexError("Index out of bounds")
+
+        if stop < 0:
+            stop = nrows + start + 1
+
+        if stop < start:
+            # will return an empty struct
+            stop = start
+
+        if stop > nrows:
+            stop = nrows
+        return slice(start, stop, step)
+
+    def _slice2rows(self, start, stop, step=None):
+        """
+        Convert a slice to an explicit array of rows
+        """
+        nrows = self._info['nrows']
+        if start is None:
+            start = 0
+        if stop is None:
+            stop = nrows
+        if step is None:
+            step = 1
+
+        tstart = self._fix_range(start)
+        tstop = self._fix_range(stop)
+        if tstart == 0 and tstop == nrows and step is None:
+            # this is faster: if all fields are also requested, then a
+            # single fread will be done
+            return None
+        if stop < start:
+            raise ValueError("start is greater than stop in slice")
+        return numpy.arange(tstart, tstop, step, dtype='i8')
+
+    def _fix_range(self, num, isslice=True):
+        """
+        Ensure the input is within range.
+
+        If el=True, then don't treat as a slice element
+        """
+
+        nrows = self._info['nrows']
+        if isslice:
+            # include the end
+            if num < 0:
+                num = nrows + (1+num)
+            elif num > nrows:
+                num = nrows
+        else:
+            # single element
+            if num < 0:
+                num = nrows + num
+            elif num > (nrows-1):
+                num = nrows-1
+
+        return num
+
+    def _rescale_and_convert_field_inplace(self, array, name, scale, zero):
+        """
+        Apply fits scalings.  Also, convert bool to proper
+        numpy boolean values
+        """
+        self._rescale_array(array[name], scale, zero)
+        if array[name].dtype == bool:
+            array[name] = self._convert_bool_array(array[name])
+        return array
+
+    def _rescale_and_convert(self, array, scale, zero, name=None):
+        """
+        Apply fits scalings.  Also, convert bool to proper
+        numpy boolean values
+        """
+        self._rescale_array(array, scale, zero)
+        if array.dtype == bool:
+            array = self._convert_bool_array(array)
+
+        return array
+
+    def _rescale_array(self, array, scale, zero):
+        """
+        Scale the input array
+        """
+        if scale != 1.0:
+            sval = numpy.array(scale, dtype=array.dtype)
+            array *= sval
+        if zero != 0.0:
+            zval = numpy.array(zero, dtype=array.dtype)
+            array += zval
+
+    def _maybe_trim_strings(self, array, trim_strings=False, **keys):
+        """
+        if requested, trim trailing white space from
+        all string fields in the input array
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if self.trim_strings or trim_strings:
+            _trim_strings(array)
+
+    def _maybe_decode_fits_ascii_strings_to_unicode_py3(self, array):
+        if IS_PY3:
+            do_conversion = False
+            new_dt = []
+            for _dt in array.dtype.descr:
+                if 'S' in _dt[1]:
+                    do_conversion = True
+                    if len(_dt) == 3:
+                        new_dt.append((
+                            _dt[0],
+                            _dt[1].replace('S', 'U').replace('|', ''),
+                            _dt[2]))
+                    else:
+                        new_dt.append((
+                            _dt[0],
+                            _dt[1].replace('S', 'U').replace('|', '')))
+                else:
+                    new_dt.append(_dt)
+            if do_conversion:
+                array = array.astype(new_dt, copy=False)
+        return array
+
+    def _convert_bool_array(self, array):
+        """
+        cfitsio reads as characters 'T' and 'F' -- convert to real boolean
+        If input is a fits bool, convert to numpy boolean
+        """
+
+        output = (array.view(numpy.int8) == ord('T')).astype(bool)
+        return output
+
+    def _get_tbl_numpy_dtype(self, colnum, include_endianness=True):
+        """
+        Get numpy type for the input column
+        """
+        table_type = self._info['hdutype']
+        table_type_string = _hdu_type_map[table_type]
+        try:
+            ftype = self._info['colinfo'][colnum]['eqtype']
+            if table_type == ASCII_TBL:
+                npy_type = _table_fits2npy_ascii[abs(ftype)]
+            else:
+                npy_type = _table_fits2npy[abs(ftype)]
+        except KeyError:
+            raise KeyError("unsupported %s fits data "
+                           "type: %d" % (table_type_string, ftype))
+
+        istbit = False
+        if (ftype == 1):
+            istbit = True
+
+        isvar = False
+        if ftype < 0:
+            isvar = True
+        if include_endianness:
+            # if binary we will read the big endian bytes directly,
+            # if ascii we read into native byte order
+            if table_type == ASCII_TBL:
+                addstr = ''
+            else:
+                addstr = '>'
+            if npy_type not in ['u1', 'i1', 'S', 'U']:
+                npy_type = addstr+npy_type
+
+        if npy_type == 'S':
+            width = self._info['colinfo'][colnum]['width']
+            npy_type = 'S%d' % width
+        elif npy_type == 'U':
+            width = self._info['colinfo'][colnum]['width']
+            npy_type = 'U%d' % width
+
+        return npy_type, isvar, istbit
+
+    def _process_args_as_rows_or_columns(self, arg, unpack=False):
+        """
+        We must be able to interpret the args as as either a column name or
+        row number, or sequences thereof.  Numpy arrays and slices are also
+        fine.
+
+        Examples:
+            'field'
+            35
+            [35,55,86]
+            ['f1',f2',...]
+        Can also be tuples or arrays.
+        """
+
+        flags = set()
+        #
+        if isinstance(arg, (tuple, list, numpy.ndarray)):
+            # a sequence was entered
+            if isstring(arg[0]):
+                result = arg
+            else:
+                result = arg
+                flags.add('isrows')
+        elif isstring(arg):
+            # a single string was entered
+            result = arg
+        elif isinstance(arg, slice):
+            if unpack:
+                flags.add('isrows')
+                result = self._slice2rows(arg.start, arg.stop, arg.step)
+            else:
+                flags.add('isrows')
+                flags.add('isslice')
+                result = self._process_slice(arg)
+        else:
+            # a single object was entered.
+            # Probably should apply some more checking on this
+            result = arg
+            flags.add('isrows')
+            if numpy.ndim(arg) == 0:
+                flags.add('isscalar')
+
+        return result, flags
+
+    def _read_var_column(self, colnum, rows, vstorage):
+        """
+
+        first read as a list of arrays, then copy into either a fixed length
+        array or an array of objects, depending on vstorage.
+
+        """
+
+        if IS_PY3:
+            stype = bytes
+        else:
+            stype = str
+
+        dlist = self._FITS.read_var_column_as_list(self._ext+1, colnum+1, rows)
+
+        if vstorage == 'fixed':
+            tform = self._info['colinfo'][colnum]['tform']
+            max_size = _extract_vararray_max(tform)
+
+            if max_size <= 0:
+                name = self._info['colinfo'][colnum]['name']
+                mess = 'Will read as an object field'
+                if max_size < 0:
+                    mess = "Column '%s': No maximum size: '%s'. %s"
+                    mess = mess % (name, tform, mess)
+                    warnings.warn(mess, FITSRuntimeWarning)
+                else:
+                    mess = "Column '%s': Max size is zero: '%s'. %s"
+                    mess = mess % (name, tform, mess)
+                    warnings.warn(mess, FITSRuntimeWarning)
+
+                # we are forced to read this as an object array
+                return self._read_var_column(colnum, rows, 'object')
+
+            if isinstance(dlist[0], stype):
+                descr = 'S%d' % max_size
+                array = numpy.fromiter(dlist, descr)
+                if IS_PY3:
+                    array = array.astype('U', copy=False)
+            else:
+                descr = dlist[0].dtype.str
+                array = numpy.zeros((len(dlist), max_size), dtype=descr)
+
+                for irow, item in enumerate(dlist):
+                    ncopy = len(item)
+                    array[irow, 0:ncopy] = item[:]
+        else:
+            array = numpy.zeros(len(dlist), dtype='O')
+            for irow, item in enumerate(dlist):
+                if IS_PY3 and isinstance(item, bytes):
+                    item = item.decode('ascii')
+                array[irow] = item
+
+        return array
+
+    def _extract_colnums(self, columns=None):
+        """
+        Extract an array of columns from the input
+        """
+        if columns is None:
+            return numpy.arange(self._ncol, dtype='i8')
+
+        if not isinstance(columns, (tuple, list, numpy.ndarray)):
+            # is a scalar
+            return self._extract_colnum(columns)
+
+        colnums = numpy.zeros(len(columns), dtype='i8')
+        for i in xrange(colnums.size):
+            colnums[i] = self._extract_colnum(columns[i])
+
+        # returns unique sorted
+        colnums = numpy.unique(colnums)
+        return colnums
+
+    def _extract_colnum(self, col):
+        """
+        Get the column number for the input column
+        """
+        if isinteger(col):
+            colnum = col
+
+            if (colnum < 0) or (colnum > (self._ncol-1)):
+                raise ValueError(
+                    "column number should be in [0,%d]" % (0, self._ncol-1))
+        else:
+            colstr = mks(col)
+            try:
+                if self.case_sensitive:
+                    mess = "column name '%s' not found (case sensitive)" % col
+                    colnum = self._colnames.index(colstr)
+                else:
+                    mess \
+                        = "column name '%s' not found (case insensitive)" % col
+                    colnum = self._colnames_lower.index(colstr.lower())
+            except ValueError:
+                raise ValueError(mess)
+        return int(colnum)
+
+    def _update_info(self):
+        """
+        Call parent method and make sure this is in fact a
+        table HDU.  Set some convenience data.
+        """
+        super(TableHDU, self)._update_info()
+        if self._info['hdutype'] == IMAGE_HDU:
+            mess = "Extension %s is not a Table HDU" % self.ext
+            raise ValueError(mess)
+        if 'colinfo' in self._info:
+            self._colnames = [i['name'] for i in self._info['colinfo']]
+            self._colnames_lower = [
+                i['name'].lower() for i in self._info['colinfo']]
+            self._ncol = len(self._colnames)
+
+    def __getitem__(self, arg):
+        """
+        Get data from a table using python [] notation.
+
+        You can use [] to extract column and row subsets, or read everything.
+        The notation is essentially the same as numpy [] notation, except that
+        a sequence of column names may also be given.  Examples reading from
+        "filename", extension "ext"
+
+            fits=fitsio.FITS(filename)
+            fits[ext][:]
+            fits[ext][2]   # returns a scalar
+            fits[ext][2:5]
+            fits[ext][200:235:2]
+            fits[ext][rows]
+            fits[ext][cols][rows]
+
+        Note data are only read once the rows are specified.
+
+        Note you can only read variable length arrays the default way,
+        using this function, so set it as you want on construction.
+
+        This function is used for ascii tables as well
+        """
+
+        res, flags = \
+            self._process_args_as_rows_or_columns(arg)
+
+        if 'isrows' in flags:
+            # rows were entered: read all columns
+            if 'isslice' in flags:
+                array = self.read_slice(res.start, res.stop, res.step)
+            else:
+                # will also get here if slice is entered but this
+                # is an ascii table
+                array = self.read(rows=res)
+        else:
+            return TableColumnSubset(self, res)
+
+        if self.lower:
+            _names_to_lower_if_recarray(array)
+        elif self.upper:
+            _names_to_upper_if_recarray(array)
+
+        self._maybe_trim_strings(array)
+
+        if 'isscalar' in flags:
+            assert array.shape[0] == 1
+            array = array[0]
+        return array
+
+    def __iter__(self):
+        """
+        Get an iterator for a table
+
+        e.g.
+        f=fitsio.FITS(fname)
+        hdu1 = f[1]
+        for row in hdu1:
+            ...
+        """
+
+        # always start with first row
+        self._iter_row = 0
+
+        # for iterating we must assume the number of rows will not change
+        self._iter_nrows = self.get_nrows()
+
+        self._buffer_iter_rows(0)
+        return self
+
+    def next(self):
+        """
+        get the next row when iterating
+
+        e.g.
+        f=fitsio.FITS(fname)
+        hdu1 = f[1]
+        for row in hdu1:
+            ...
+
+        By default read one row at a time.  Send iter_row_buffer to get a more
+        efficient buffering.
+        """
+        return self._get_next_buffered_row()
+
+    __next__ = next
+
+    def _get_next_buffered_row(self):
+        """
+        Get the next row for iteration.
+        """
+        if self._iter_row == self._iter_nrows:
+            raise StopIteration
+
+        if self._row_buffer_index >= self._iter_row_buffer:
+            self._buffer_iter_rows(self._iter_row)
+
+        data = self._row_buffer[self._row_buffer_index]
+        self._iter_row += 1
+        self._row_buffer_index += 1
+        return data
+
+    def _buffer_iter_rows(self, start):
+        """
+        Read in the buffer for iteration
+        """
+        self._row_buffer = self[start:start+self._iter_row_buffer]
+
+        # start back at the front of the buffer
+        self._row_buffer_index = 0
+
+    def __repr__(self):
+        """
+        textual representation for some metadata
+        """
+        text, spacing = self._get_repr_list()
+
+        text.append('%srows: %d' % (spacing, self._info['nrows']))
+        text.append('%scolumn info:' % spacing)
+
+        cspacing = ' '*4
+        nspace = 4
+        nname = 15
+        ntype = 6
+        format = cspacing + "%-" + str(nname) + "s %" + str(ntype) + "s  %s"
+        pformat = (
+            cspacing + "%-" +
+            str(nname) + "s\n %" +
+            str(nspace+nname+ntype) + "s  %s")
+
+        for colnum, c in enumerate(self._info['colinfo']):
+            if len(c['name']) > nname:
+                f = pformat
+            else:
+                f = format
+
+            dt, isvar, istbit = self._get_tbl_numpy_dtype(
+                colnum, include_endianness=False)
+            if isvar:
+                tform = self._info['colinfo'][colnum]['tform']
+                if dt[0] == 'S':
+                    dt = 'S0'
+                    dimstr = 'vstring[%d]' % _extract_vararray_max(tform)
+                else:
+                    dimstr = 'varray[%s]' % _extract_vararray_max(tform)
+            else:
+                if dt[0] == 'S':
+                    is_string = True
+                else:
+                    is_string = False
+                dimstr = _get_col_dimstr(c['tdim'], is_string=is_string)
+
+            s = f % (c['name'], dt, dimstr)
+            text.append(s)
+
+        text = '\n'.join(text)
+        return text
+
+
+class AsciiTableHDU(TableHDU):
+    def read(self, rows=None, columns=None, vstorage=None,
+             upper=False, lower=False, trim_strings=False, **keys):
+        """
+        read a data from an ascii table HDU
+
+        By default, all rows are read.  Send rows= to select subsets of the
+        data.  Table data are read into a recarray for multiple columns,
+        plain array for a single column.
+
+        parameters
+        ----------
+        columns: list/array
+            An optional set of columns to read from table HDUs.  Can be string
+            or number. If a sequence, a recarray is always returned.  If a
+            scalar, an ordinary array is returned.
+        rows: list/array, optional
+            An optional list of rows to read from table HDUS.  Default is to
+            read all.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns.  Can
+            be 'fixed' or 'object'.  See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        # if columns is None, returns all.  Guaranteed to be unique and sorted
+        colnums = self._extract_colnums(columns)
+        if isinstance(colnums, int):
+            # scalar sent, don't read as a recarray
+            return self.read_column(
+                columns, rows=rows, vstorage=vstorage,
+                upper=upper, lower=lower, trim_strings=trim_strings)
+
+        rows = self._extract_rows(rows)
+        if rows is None:
+            nrows = self._info['nrows']
+        else:
+            nrows = rows.size
+
+        # if rows is None still returns None, and is correctly interpreted
+        # by the reader to mean all
+        rows = self._extract_rows(rows)
+
+        # this is the full dtype for all columns
+        dtype, offsets, isvar = self.get_rec_dtype(
+            colnums=colnums, vstorage=vstorage)
+        array = numpy.zeros(nrows, dtype=dtype)
+
+        # note reading into existing data
+        wnotvar, = numpy.where(isvar == False)  # noqa
+        if wnotvar.size > 0:
+            for i in wnotvar:
+                colnum = colnums[i]
+                name = array.dtype.names[i]
+                a = array[name].copy()
+                self._FITS.read_column(self._ext+1, colnum+1, a, rows)
+                array[name] = a
+                del a
+
+        array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+        wvar, = numpy.where(isvar == True)  # noqa
+        if wvar.size > 0:
+            for i in wvar:
+                colnum = colnums[i]
+                name = array.dtype.names[i]
+                dlist = self._FITS.read_var_column_as_list(
+                    self._ext+1, colnum+1, rows)
+                if (isinstance(dlist[0], str) or
+                        (IS_PY3 and isinstance(dlist[0], bytes))):
+                    is_string = True
+                else:
+                    is_string = False
+
+                if array[name].dtype.descr[0][1][1] == 'O':
+                    # storing in object array
+                    # get references to each, no copy made
+                    for irow, item in enumerate(dlist):
+                        if IS_PY3 and isinstance(item, bytes):
+                            item = item.decode('ascii')
+                        array[name][irow] = item
+                else:
+                    for irow, item in enumerate(dlist):
+                        if IS_PY3 and isinstance(item, bytes):
+                            item = item.decode('ascii')
+                        if is_string:
+                            array[name][irow] = item
+                        else:
+                            ncopy = len(item)
+                            array[name][irow][0:ncopy] = item[:]
+
+        if self.lower or lower:
+            _names_to_lower_if_recarray(array)
+        elif self.upper or upper:
+            _names_to_upper_if_recarray(array)
+
+        self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+        return array
+
+    read_ascii = read
+
+
+class TableColumnSubset(object):
+    """
+
+    A class representing a subset of the the columns on disk.  When called
+    with .read() or [ rows ]  the data are read from disk.
+
+    Useful because subsets can be passed around to functions, or chained
+    with a row selection.
+
+    This class is returned when using [ ] notation to specify fields in a
+    TableHDU class
+
+        fits = fitsio.FITS(fname)
+        colsub = fits[ext][field_list]
+
+    returns a TableColumnSubset object.  To read rows:
+
+        data = fits[ext][field_list][row_list]
+
+        colsub = fits[ext][field_list]
+        data = colsub[row_list]
+        data = colsub.read(rows=row_list)
+
+    to read all, use .read() with no args or [:]
+    """
+
+    def __init__(self, fitshdu, columns):
+        """
+        Input is the FITS instance and a list of column names.
+        """
+
+        self.columns = columns
+        if isstring(columns) or isinteger(columns):
+            # this is to check if it exists
+            self.colnums = [fitshdu._extract_colnum(columns)]
+
+            self.is_scalar = True
+            self.columns_list = [columns]
+        else:
+            # this is to check if it exists
+            self.colnums = fitshdu._extract_colnums(columns)
+
+            self.is_scalar = False
+            self.columns_list = columns
+
+        self.fitshdu = fitshdu
+
+    def read(self, columns=None, rows=None, vstorage=None, lower=False,
+             upper=False, trim_strings=False, **keys):
+        """
+        Read the data from disk and return as a numpy array
+
+        parameters
+        ----------
+        columns: list/array, optional
+            An optional set of columns to read from table HDUs.  Can be string
+            or number. If a sequence, a recarray is always returned.  If a
+            scalar, an ordinary array is returned.
+        rows: optional
+            An optional list of rows to read from table HDUS.  Default is to
+            read all.
+        vstorage: string, optional
+            Over-ride the default method to store variable length columns. Can
+            be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+        lower: bool, optional
+            If True, force all columns names to lower case in output. Will over
+            ride the lower= keyword from construction.
+        upper: bool, optional
+            If True, force all columns names to upper case in output. Will over
+            ride the lower= keyword from construction.
+        trim_strings: bool, optional
+            If True, trim trailing spaces from strings. Will over-ride the
+            trim_strings= keyword from constructor.
+        """
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if self.is_scalar:
+            data = self.fitshdu.read_column(
+                self.columns,
+                rows=rows, vstorage=vstorage, lower=lower, upper=upper,
+                trim_strings=trim_strings)
+        else:
+            if columns is None:
+                c = self.columns
+            else:
+                c = columns
+            data = self.fitshdu.read(
+                columns=c,
+                rows=rows, vstorage=vstorage, lower=lower, upper=upper,
+                trim_strings=trim_strings)
+
+        return data
+
+    def __getitem__(self, arg):
+        """
+        If columns are sent, then the columns will just get reset and
+        we'll return a new object
+
+        If rows are sent, they are read and the result returned.
+        """
+
+        # we have to unpack the rows if we are reading a subset
+        # of the columns because our slice operator only works
+        # on whole rows.  We could allow rows= keyword to
+        # be a slice...
+
+        res, flags = \
+            self.fitshdu._process_args_as_rows_or_columns(arg, unpack=True)
+        if 'isrows' in flags:
+            # rows was entered: read all current column subset
+            array = self.read(rows=res)
+            if 'isscalar' in flags:
+                assert array.shape[0] == 1
+                array = array[0]
+            return array
+        else:
+            # columns was entered.  Return a subset objects
+            return TableColumnSubset(self.fitshdu, columns=res)
+
+    def __repr__(self):
+        """
+        Representation for TableColumnSubset
+        """
+        spacing = ' '*2
+        cspacing = ' '*4
+
+        hdu = self.fitshdu
+        info = self.fitshdu._info
+        colinfo = info['colinfo']
+
+        text = []
+        text.append("%sfile: %s" % (spacing, hdu._filename))
+        text.append("%sextension: %d" % (spacing, info['hdunum']-1))
+        text.append("%stype: %s" % (spacing, _hdu_type_map[info['hdutype']]))
+        text.append('%srows: %d' % (spacing, info['nrows']))
+        text.append("%scolumn subset:" % spacing)
+
+        cspacing = ' '*4
+        nspace = 4
+        nname = 15
+        ntype = 6
+        format = cspacing + "%-" + str(nname) + "s %" + str(ntype) + "s  %s"
+        pformat = (
+            cspacing + "%-" + str(nname) + "s\n %" +
+            str(nspace+nname+ntype) + "s  %s")
+
+        for colnum in self.colnums:
+            cinfo = colinfo[colnum]
+
+            if len(cinfo['name']) > nname:
+                f = pformat
+            else:
+                f = format
+
+            dt, isvar, istbit = hdu._get_tbl_numpy_dtype(
+                colnum, include_endianness=False)
+            if isvar:
+                tform = cinfo['tform']
+                if dt[0] == 'S':
+                    dt = 'S0'
+                    dimstr = 'vstring[%d]' % _extract_vararray_max(tform)
+                else:
+                    dimstr = 'varray[%s]' % _extract_vararray_max(tform)
+            else:
+                dimstr = _get_col_dimstr(cinfo['tdim'])
+
+            s = f % (cinfo['name'], dt, dimstr)
+            text.append(s)
+
+        s = "\n".join(text)
+        return s
+
+
+def _tdim2shape(tdim, name, is_string=False):
+    shape = None
+    if tdim is None:
+        raise ValueError("field '%s' has malformed TDIM" % name)
+
+    if len(tdim) > 1 or tdim[0] > 1:
+        if is_string:
+            shape = list(reversed(tdim[1:]))
+        else:
+            shape = list(reversed(tdim))
+
+        if len(shape) == 1:
+            shape = shape[0]
+        else:
+            shape = tuple(shape)
+
+    return shape
+
+
+def _names_to_lower_if_recarray(data):
+    if data.dtype.names is not None:
+        data.dtype.names = [n.lower() for n in data.dtype.names]
+
+
+def _names_to_upper_if_recarray(data):
+    if data.dtype.names is not None:
+        data.dtype.names = [n.upper() for n in data.dtype.names]
+
+
+def _trim_strings(data):
+    names = data.dtype.names
+    if names is not None:
+        # run through each field separately
+        for n in names:
+            if data[n].dtype.descr[0][1][1] in ['S', 'U']:
+                data[n] = numpy.char.rstrip(data[n])
+    else:
+        if data.dtype.descr[0][1][1] in ['S', 'U']:
+            data[:] = numpy.char.rstrip(data[:])
+
+
+def _extract_vararray_max(tform):
+    """
+    Extract number from PX(number)
+    """
+    first = tform.find('(')
+    last = tform.rfind(')')
+
+    if first == -1 or last == -1:
+        # no max length specified
+        return -1
+
+    maxnum = int(tform[first+1:last])
+    return maxnum
+
+
+def _get_col_dimstr(tdim, is_string=False):
+    """
+    not for variable length
+    """
+    dimstr = ''
+    if tdim is None:
+        dimstr = 'array[bad TDIM]'
+    else:
+        if is_string:
+            if len(tdim) > 1:
+                dimstr = [str(d) for d in tdim[1:]]
+        else:
+            if len(tdim) > 1 or tdim[0] > 1:
+                dimstr = [str(d) for d in tdim]
+        if dimstr != '':
+            dimstr = ','.join(dimstr)
+            dimstr = 'array[%s]' % dimstr
+
+    return dimstr
+
+
+# no support yet for complex
+# all strings are read as bytes for python3 and then decoded to unicode
+_table_fits2npy = {1: 'i1',
+                   11: 'u1',
+                   12: 'i1',
+                   # logical. Note pyfits uses this for i1,
+                   # cfitsio casts to char*
+                   14: 'b1',
+                   16: 'S',
+                   20: 'u2',
+                   21: 'i2',
+                   30: 'u4',  # 30=TUINT
+                   31: 'i4',  # 31=TINT
+                   40: 'u4',  # 40=TULONG
+                   41: 'i4',  # 41=TLONG
+                   42: 'f4',
+                   81: 'i8',
+                   82: 'f8',
+                   83: 'c8',   # TCOMPLEX
+                   163: 'c16'}  # TDBLCOMPLEX
+
+# cfitsio returns only types f8, i4 and strings for column types. in order to
+# avoid data loss, we always use i8 for integer types
+# all strings are read as bytes for python3 and then decoded to unicode
+_table_fits2npy_ascii = {16: 'S',
+                         31: 'i8',  # listed as TINT, reading as i8
+                         41: 'i8',  # listed as TLONG, reading as i8
+                         81: 'i8',
+                         21: 'i4',  # listed as TSHORT, reading as i4
+                         42: 'f8',  # listed as TFLOAT, reading as f8
+                         82: 'f8'}
+
+# for TFORM
+_table_npy2fits_form = {'b1': 'L',
+                        'u1': 'B',
+                        'i1': 'S',  # gets converted to unsigned
+                        'S': 'A',
+                        'U': 'A',
+                        'u2': 'U',  # gets converted to signed
+                        'i2': 'I',
+                        'u4': 'V',  # gets converted to signed
+                        'i4': 'J',
+                        'i8': 'K',
+                        'f4': 'E',
+                        'f8': 'D',
+                        'c8': 'C',
+                        'c16': 'M'}
+
+# from mrdfits; note G gets turned into E
+# types=  ['A',   'I',   'L',   'B',   'F',    'D',      'C',     'M',     'K']
+# formats=['A1',  'I6',  'I10', 'I4',  'G15.9','G23.17', 'G15.9', 'G23.17',
+#          'I20']
+
+_table_npy2fits_form_ascii = {'S': 'A1',       # Need to add max here
+                              'U': 'A1',       # Need to add max here
+                              'i2': 'I7',      # I
+                              'i4': 'I12',     # ??
+                              # 'i8':'I21',     # K # i8 aren't supported
+                              # 'f4':'E15.7',   # F
+                              # F We must write as f8 since we can only
+                              # read as f8
+                              'f4': 'E26.17',
+                              # D 25.16 looks right, but this is recommended
+                              'f8': 'E26.17'}
+
+
+def _npy2fits(d, table_type='binary', write_bitcols=False):
+    """
+    d is the full element from the descr
+    """
+    npy_dtype = d[1][1:]
+    if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
+        name, form, dim = _npy_string2fits(d, table_type=table_type)
+    else:
+        name, form, dim = _npy_num2fits(
+            d, table_type=table_type, write_bitcols=write_bitcols)
+
+    return name, form, dim
+
+
+def _npy_num2fits(d, table_type='binary', write_bitcols=False):
+    """
+    d is the full element from the descr
+
+    For vector,array columns the form is the total counts
+    followed by the code.
+
+    For array columns with dimension greater than 1, the dim is set to
+        (dim1, dim2, ...)
+    So it is treated like an extra dimension
+
+    """
+
+    dim = None
+
+    name = d[0]
+
+    npy_dtype = d[1][1:]
+    if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
+        raise ValueError("got S or U type: use _npy_string2fits")
+
+    if npy_dtype not in _table_npy2fits_form:
+        raise ValueError("unsupported type '%s'" % npy_dtype)
+
+    if table_type == 'binary':
+        form = _table_npy2fits_form[npy_dtype]
+    else:
+        form = _table_npy2fits_form_ascii[npy_dtype]
+
+    # now the dimensions
+    if len(d) > 2:
+        if table_type == 'ascii':
+            raise ValueError(
+                "Ascii table columns must be scalar, got %s" % str(d))
+
+        if write_bitcols and npy_dtype == 'b1':
+            # multi-dimensional boolean
+            form = 'X'
+
+        # Note, depending on numpy version, even 1-d can be a tuple
+        if isinstance(d[2], tuple):
+            count = reduce(lambda x, y: x*y, d[2])
+            form = '%d%s' % (count, form)
+
+            if len(d[2]) > 1:
+                # this is multi-dimensional array column.  the form
+                # should be total elements followed by A
+                dim = list(reversed(d[2]))
+                dim = [str(e) for e in dim]
+                dim = '(' + ','.join(dim)+')'
+        else:
+            # this is a vector (1d array) column
+            count = d[2]
+            form = '%d%s' % (count, form)
+
+    return name, form, dim
+
+
+def _npy_string2fits(d, table_type='binary'):
+    """
+    d is the full element from the descr
+
+    form for strings is the total number of bytes followed by A.  Thus
+    for vector or array columns it is the size of the string times the
+    total number of elements in the array.
+
+    Then the dim is set to
+        (sizeofeachstring, dim1, dim2, ...)
+    So it is treated like an extra dimension
+
+    """
+
+    dim = None
+
+    name = d[0]
+
+    npy_dtype = d[1][1:]
+    if npy_dtype[0] != 'S' and npy_dtype[0] != 'U':
+        raise ValueError("expected S or U type, got %s" % npy_dtype[0])
+
+    # get the size of each string
+    string_size_str = npy_dtype[1:]
+    string_size = int(string_size_str)
+
+    if string_size <= 0:
+        raise ValueError('string sizes must be > 0, '
+                         'got %s for field %s' % (npy_dtype, name))
+
+    # now the dimensions
+    if len(d) == 2:
+        if table_type == 'ascii':
+            form = 'A'+string_size_str
+        else:
+            form = string_size_str+'A'
+    else:
+        if table_type == 'ascii':
+            raise ValueError(
+                "Ascii table columns must be scalar, got %s" % str(d))
+        if isinstance(d[2], tuple):
+            # this is an array column.  the form
+            # should be total elements followed by A
+            # count = 1
+            # count = [count*el for el in d[2]]
+            count = reduce(lambda x, y: x*y, d[2])
+            count = string_size*count
+            form = '%dA' % count
+
+            # will have to do tests to see if this is the right order
+            dim = list(reversed(d[2]))
+            # dim = d[2]
+            dim = [string_size_str] + [str(e) for e in dim]
+            dim = '(' + ','.join(dim)+')'
+        else:
+            # this is a vector (1d array) column
+            count = string_size*d[2]
+            form = '%dA' % count
+
+            # will have to do tests to see if this is the right order
+            dim = [string_size_str, str(d[2])]
+            dim = '(' + ','.join(dim)+')'
+
+    return name, form, dim
diff --git a/fitsio/header.py b/fitsio/header.py
new file mode 100644 (file)
index 0000000..4b4ebd7
--- /dev/null
@@ -0,0 +1,724 @@
+"""
+header classes for fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+  Copyright (C) 2011  Erin Sheldon, BNL.  erin dot sheldon at gmail dot com
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+"""
+from __future__ import with_statement, print_function
+import warnings
+
+from . import _fitsio_wrap
+from .util import isstring, FITSRuntimeWarning, IS_PY3
+
+# for python3 compat
+if IS_PY3:
+    xrange = range
+
+TYP_STRUC_KEY = 10
+TYP_CMPRS_KEY = 20
+TYP_SCAL_KEY = 30
+TYP_NULL_KEY = 40
+TYP_DIM_KEY = 50
+TYP_RANG_KEY = 60
+TYP_UNIT_KEY = 70
+TYP_DISP_KEY = 80
+TYP_HDUID_KEY = 90
+TYP_CKSUM_KEY = 100
+TYP_WCS_KEY = 110
+TYP_REFSYS_KEY = 120
+TYP_COMM_KEY = 130
+TYP_CONT_KEY = 140
+TYP_USER_KEY = 150
+
+
+class FITSHDR(object):
+    """
+    A class representing a FITS header.
+
+    parameters
+    ----------
+    record_list: optional
+        A list of dicts, or dict, or another FITSHDR
+          - list of dictionaries containing 'name','value' and optionally
+            a 'comment' field; the order is preserved.
+          - a dictionary of keyword-value pairs; no comments are written
+            in this case, and the order is arbitrary.
+          - another FITSHDR object; the order is preserved.
+
+    examples:
+
+        hdr=FITSHDR()
+
+        # set a simple value
+        hdr['blah'] = 35
+
+        # set from a dict to include a comment.
+        rec={'name':'fromdict', 'value':3, 'comment':'my comment'}
+        hdr.add_record(rec)
+
+        # can do the same with a full FITSRecord
+        rec=FITSRecord( {'name':'temp', 'value':35, 'comment':'temp in C'} )
+        hdr.add_record(rec)
+
+        # in the above, the record is replaced if one with the same name
+        # exists, except for COMMENT and HISTORY, which can exist as
+        # duplicates
+
+        # print the header
+        print(hdr)
+
+        # print a single record
+        print(hdr['fromdict'])
+
+
+        # can also set from a card
+        hdr.add_record('test    =                   77')
+        # using a FITSRecord object (internally uses FITSCard)
+        card=FITSRecord('test    =                   77')
+        hdr.add_record(card)
+
+        # can also construct with a record list
+        recs=[{'name':'test', 'value':35, 'comment':'a comment'},
+              {'name':'blah', 'value':'some string'}]
+        hdr=FITSHDR(recs)
+
+        # if you have no comments, you can construct with a simple dict
+        recs={'day':'saturday',
+              'telescope':'blanco'}
+        hdr=FITSHDR(recs)
+
+    """
+    def __init__(self, record_list=None):
+
+        self._record_list = []
+        self._record_map = {}
+        self._index_map = {}
+
+        if isinstance(record_list, FITSHDR):
+            for r in record_list.records():
+                self.add_record(r)
+        elif isinstance(record_list, dict):
+            for k in record_list:
+                r = {'name': k, 'value': record_list[k]}
+                self.add_record(r)
+        elif isinstance(record_list, list):
+            for r in record_list:
+                self.add_record(r)
+        elif record_list is not None:
+            raise ValueError("expected a dict or list of dicts or FITSHDR")
+
+    def add_record(self, record_in):
+        """
+        Add a new record.  Strip quotes from around strings.
+
+        This will over-write if the key already exists, except
+        for COMMENT and HISTORY fields
+
+        parameters
+        -----------
+        record:
+            The record, either a dict or a header card string
+            or a FITSRecord or FITSCard
+        """
+        if (isinstance(record_in, dict) and
+                'name' in record_in and 'value' in record_in):
+            record = {}
+            record.update(record_in)
+        else:
+            record = FITSRecord(record_in)
+
+        # only append when this name already exists if it is
+        # a comment or history field, otherwise simply over-write
+        key = record['name']
+        if key is not None:
+            key = key.upper()
+
+        key_exists = key in self._record_map
+
+        if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE', None):
+            # append new record
+            self._record_list.append(record)
+            index = len(self._record_list)-1
+            self._index_map[key] = index
+        else:
+            # over-write existing
+            index = self._index_map[key]
+            self._record_list[index] = record
+
+        self._record_map[key] = record
+
+    def _add_to_map(self, record):
+        key = record['name'].upper()
+        self._record_map[key] = record
+
+    def get_comment(self, item):
+        """
+        Get the comment for the requested entry
+        """
+        key = item.upper()
+        if key not in self._record_map:
+            raise KeyError("unknown record: %s" % key)
+
+        if 'comment' not in self._record_map[key]:
+            return None
+        else:
+            return self._record_map[key]['comment']
+
+    def records(self):
+        """
+        Return the list of full records as a list of dictionaries.
+        """
+        return self._record_list
+
+    def keys(self):
+        """
+        Return a copy of the current key list.
+        """
+        return [e['name'] for e in self._record_list]
+
+    def delete(self, name):
+        """
+        Delete the specified entry if it exists.
+        """
+        if isinstance(name, (list, tuple)):
+            for xx in name:
+                self.delete(xx)
+        else:
+            if name in self._record_map:
+                del self._record_map[name]
+                self._record_list = [
+                    r for r in self._record_list if r['name'] != name]
+
+    def clean(self, is_table=False):
+        """
+        Remove reserved keywords from the header.
+
+        These are keywords that the fits writer must write in order
+        to maintain consistency between header and data.
+
+        keywords
+        --------
+        is_table: bool, optional
+            Set True if this is a table, so extra keywords will be cleaned
+        """
+
+        rmnames = [
+            'SIMPLE', 'EXTEND', 'XTENSION', 'BITPIX', 'PCOUNT', 'GCOUNT',
+            'THEAP',
+            'EXTNAME',
+            # 'BLANK',
+            'ZQUANTIZ', 'ZDITHER0', 'ZIMAGE', 'ZCMPTYPE',
+            'ZSIMPLE', 'ZTENSION', 'ZPCOUNT', 'ZGCOUNT',
+            'ZBITPIX', 'ZEXTEND',
+            # 'FZTILELN','FZALGOR',
+            'CHECKSUM', 'DATASUM']
+
+        if is_table:
+            # these are not allowed in tables
+            rmnames += [
+                'BUNIT', 'BSCALE', 'BZERO',
+            ]
+
+        self.delete(rmnames)
+
+        r = self._record_map.get('NAXIS', None)
+        if r is not None:
+            naxis = int(r['value'])
+            self.delete('NAXIS')
+
+            rmnames = ['NAXIS%d' % i for i in xrange(1, naxis+1)]
+            self.delete(rmnames)
+
+        r = self._record_map.get('ZNAXIS', None)
+        self.delete('ZNAXIS')
+        if r is not None:
+
+            znaxis = int(r['value'])
+
+            rmnames = ['ZNAXIS%d' % i for i in xrange(1, znaxis+1)]
+            self.delete(rmnames)
+            rmnames = ['ZTILE%d' % i for i in xrange(1, znaxis+1)]
+            self.delete(rmnames)
+            rmnames = ['ZNAME%d' % i for i in xrange(1, znaxis+1)]
+            self.delete(rmnames)
+            rmnames = ['ZVAL%d' % i for i in xrange(1, znaxis+1)]
+            self.delete(rmnames)
+
+        r = self._record_map.get('TFIELDS', None)
+        if r is not None:
+            tfields = int(r['value'])
+            self.delete('TFIELDS')
+
+            if tfields > 0:
+
+                nbase = [
+                    'TFORM', 'TTYPE', 'TDIM', 'TUNIT', 'TSCAL', 'TZERO',
+                    'TNULL', 'TDISP', 'TDMIN', 'TDMAX', 'TDESC', 'TROTA',
+                    'TRPIX', 'TRVAL', 'TDELT', 'TCUNI',
+                    # 'FZALG'
+                ]
+                for i in xrange(1, tfields+1):
+                    names = ['%s%d' % (n, i) for n in nbase]
+                    self.delete(names)
+
+    def get(self, item, default_value=None):
+        """
+        Get the requested header entry by keyword name
+        """
+
+        found, name = self._contains_and_name(item)
+        if found:
+            return self._record_map[name]['value']
+        else:
+            return default_value
+
+    def __len__(self):
+        return len(self._record_list)
+
+    def __contains__(self, item):
+        found, _ = self._contains_and_name(item)
+        return found
+
+    def _contains_and_name(self, item):
+
+        if isinstance(item, FITSRecord):
+            name = item['name']
+        elif isinstance(item, dict):
+            name = item.get('name', None)
+            if name is None:
+                raise ValueError("dict record must have 'name' field")
+        else:
+            name = item
+
+        found = False
+        if name is None:
+            if None in self._record_map:
+                found = True
+        else:
+            name = name.upper()
+            if name in self._record_map:
+                found = True
+            elif name[0:8] == 'HIERARCH':
+                if len(name) > 9:
+                    name = name[9:]
+                    if name in self._record_map:
+                        found = True
+
+        return found, name
+
+    def __setitem__(self, item, value):
+        if isinstance(value, (dict, FITSRecord)):
+            if item.upper() != value['name'].upper():
+                raise ValueError("when setting using a FITSRecord, the "
+                                 "name field must match")
+            rec = value
+        else:
+            rec = {'name': item, 'value': value}
+
+        try:
+            # the entry may already exist; if so, preserve the comment
+            comment = self.get_comment(item)
+            rec['comment'] = comment
+        except KeyError:
+            pass
+
+        self.add_record(rec)
+
+    def __getitem__(self, item):
+        if item not in self:
+            raise KeyError("unknown record: %s" % item)
+
+        return self.get(item)
+
+    def __iter__(self):
+        self._current = 0
+        return self
+
+    def next(self):
+        """
+        for iteration over the header entries
+        """
+        if self._current < len(self._record_list):
+            rec = self._record_list[self._current]
+            key = rec['name']
+            self._current += 1
+            return key
+        else:
+            raise StopIteration
+    __next__ = next
+
+    def _record2card(self, record):
+        """
+        when we add new records they don't have a card,
+        this sort of fakes it up similar to what cfitsio
+        does, just for display purposes.  e.g.
+
+            DBL     =            23.299843
+            LNG     =              3423432
+            KEYSNC  = 'hello   '
+            KEYSC   = 'hello   '           / a comment for string
+            KEYDC   =     3.14159265358979 / a comment for pi
+            KEYLC   =            323423432 / a comment for long
+
+        basically,
+            - 8 chars, left aligned, for the keyword name
+            - a space
+            - 20 chars for value, left aligned for strings, right aligned for
+              numbers
+            - if there is a comment, one space followed by / then another space
+              then the comment out to 80 chars
+
+        """
+        name = record['name']
+        value = record['value']
+        comment = record.get('comment', '')
+
+        v_isstring = isstring(value)
+
+        if name is None:
+            card = '         %s' % comment
+        elif name == 'COMMENT':
+            card = 'COMMENT %s' % comment
+        elif name == 'CONTINUE':
+            card = 'CONTINUE   %s' % value
+        elif name == 'HISTORY':
+            card = 'HISTORY   %s' % value
+        else:
+            if len(name) > 8:
+                card = 'HIERARCH %s= ' % name
+            else:
+                card = '%-8s= ' % name[0:8]
+
+            # these may be string representations of data, or actual strings
+            if v_isstring:
+                value = str(value)
+                if len(value) > 0:
+                    if value[0] != "'":
+                        # this is a string representing a string header field
+                        # make it look like it will look in the header
+                        value = "'" + value + "'"
+                        vstr = '%-20s' % value
+                    else:
+                        vstr = "%20s" % value
+                else:
+                    vstr = "''"
+            else:
+                if value is True:
+                    value = 'T'
+                elif value is False:
+                    value = 'F'
+
+                vstr = '%20s' % value
+
+            card += vstr
+
+            if 'comment' in record:
+                card += ' / %s' % record['comment']
+
+        if v_isstring and len(card) > 80:
+            card = card[0:79] + "'"
+        else:
+            card = card[0:80]
+
+        return card
+
+    def __repr__(self):
+        rep = ['']
+        for r in self._record_list:
+            card = self._record2card(r)
+            # if 'card_string' not in r:
+            #     card = self._record2card(r)
+            # else:
+            #     card = r['card_string']
+
+            rep.append(card)
+        return '\n'.join(rep)
+
+
+class FITSRecord(dict):
+    """
+    Class to represent a FITS header record
+
+    parameters
+    ----------
+    record: string or dict
+        If a string, it should represent a FITS header card
+
+        If a dict it should have 'name' and 'value' fields.
+        Can have a 'comment' field.
+
+    examples
+    --------
+
+    # from a dict.  Can include a comment
+    rec=FITSRecord( {'name':'temp', 'value':35, 'comment':'temperature in C'} )
+
+    # from a card
+    card=FITSRecord('test    =                   77 / My comment')
+
+    """
+    def __init__(self, record):
+        self.set_record(record)
+
+    def set_record(self, record, **keys):
+        """
+        check the record is valid and set keys in the dict
+
+        parameters
+        ----------
+        record: string
+            Dict representing a record or a string representing a FITS header
+            card
+        """
+
+        if keys:
+            import warnings
+            warnings.warn(
+                "The keyword arguments '%s' are being ignored! This warning "
+                "will be an error in a future version of `fitsio`!" % keys,
+                DeprecationWarning, stacklevel=2)
+
+        if isstring(record):
+            card = FITSCard(record)
+            self.update(card)
+
+            self.verify()
+
+        else:
+
+            if isinstance(record, FITSRecord):
+                self.update(record)
+            elif isinstance(record, dict):
+                if 'name' in record and 'value' in record:
+                    self.update(record)
+
+                elif 'card_string' in record:
+                    self.set_record(record['card_string'])
+
+                else:
+                    raise ValueError('record must have name,value fields '
+                                     'or a card_string field')
+            else:
+                raise ValueError("record must be a string card or "
+                                 "dictionary or FITSRecord")
+
+    def verify(self):
+        """
+        make sure name,value exist
+        """
+        if 'name' not in self:
+            raise ValueError("each record must have a 'name' field")
+        if 'value' not in self:
+            raise ValueError("each record must have a 'value' field")
+
+
+_BLANK = '       '
+
+
+class FITSCard(FITSRecord):
+    """
+    class to represent ordinary FITS cards.
+
+    CONTINUE not supported
+
+    examples
+    --------
+
+    # from a card
+    card=FITSRecord('test    =                   77 / My comment')
+    """
+    def __init__(self, card_string):
+        self.set_card(card_string)
+
+    def set_card(self, card_string):
+        self['card_string'] = card_string
+
+        self._check_hierarch()
+
+        if self._is_hierarch:
+            self._set_as_key()
+        else:
+            self._check_equals()
+
+            self._check_type()
+            self._check_len()
+
+            front = card_string[0:7]
+            if (not self.has_equals() or
+                    front in ['COMMENT', 'HISTORY', 'CONTINU', _BLANK]):
+
+                if front == 'HISTORY':
+                    self._set_as_history()
+                elif front == 'CONTINU':
+                    self._set_as_continue()
+                elif front == _BLANK:
+                    self._set_as_blank()
+                else:
+                    # note anything without an = and not history and not blank
+                    # key comment is treated as COMMENT; this is built into
+                    # cfitsio as well
+                    self._set_as_comment()
+
+                if self.has_equals():
+                    mess = (
+                        "warning: It is not FITS-compliant for a %s header "
+                        "card to include an = sign. There may be slight "
+                        "inconsistencies if you write this back out to a "
+                        "file.")
+                    mess = mess % (card_string[:8])
+                    warnings.warn(mess, FITSRuntimeWarning)
+            else:
+                self._set_as_key()
+
+    def has_equals(self):
+        """
+        True if = is in position 8
+        """
+        return self._has_equals
+
+    def _check_hierarch(self):
+        card_string = self['card_string']
+        if card_string[0:8].upper() == 'HIERARCH':
+            self._is_hierarch = True
+        else:
+            self._is_hierarch = False
+
+    def _check_equals(self):
+        """
+        check for = in position 8, set attribute _has_equals
+        """
+        card_string = self['card_string']
+        if len(card_string) < 9:
+            self._has_equals = False
+        elif card_string[8] == '=':
+            self._has_equals = True
+        else:
+            self._has_equals = False
+
+    def _set_as_key(self):
+        card_string = self['card_string']
+        res = _fitsio_wrap.parse_card(card_string)
+        if len(res) == 5:
+            keyclass, name, value, dtype, comment = res
+        else:
+            keyclass, name, dtype, comment = res
+            value = None
+
+        if keyclass == TYP_CONT_KEY:
+            raise ValueError("bad card '%s'.  CONTINUE not "
+                             "supported" % card_string)
+
+        self['class'] = keyclass
+        self['name'] = name
+        self['value_orig'] = value
+        self['value'] = self._convert_value(value)
+        self['dtype'] = dtype
+        self['comment'] = comment
+
+    def _set_as_blank(self):
+        self['class'] = TYP_USER_KEY
+        self['name'] = None
+        self['value'] = None
+        self['comment'] = self['card_string'][8:]
+
+    def _set_as_comment(self):
+        comment = self._extract_comm_or_hist_value()
+
+        self['class'] = TYP_COMM_KEY
+        self['name'] = 'COMMENT'
+        self['value'] = comment
+
+    def _set_as_history(self):
+        history = self._extract_comm_or_hist_value()
+
+        self['class'] = TYP_COMM_KEY
+        self['name'] = 'HISTORY'
+        self['value'] = history
+
+    def _set_as_continue(self):
+        value = self._extract_comm_or_hist_value()
+
+        self['class'] = TYP_CONT_KEY
+        self['name'] = 'CONTINUE'
+        self['value'] = value
+
+    def _convert_value(self, value_orig):
+        """
+        things like 6 and 1.25 are converted with ast.literal_value
+
+        Things like 'hello' are stripped of quotes
+        """
+        import ast
+        if value_orig is None:
+            return value_orig
+
+        if value_orig.startswith("'") and value_orig.endswith("'"):
+            value = value_orig[1:-1]
+        else:
+
+            try:
+                avalue = ast.parse(value_orig).body[0].value
+                if isinstance(avalue, ast.BinOp):
+                    # this is probably a string that happens to look like
+                    # a binary operation, e.g. '25-3'
+                    value = value_orig
+                else:
+                    value = ast.literal_eval(value_orig)
+            except Exception:
+                value = self._convert_string(value_orig)
+
+            if isinstance(value, int) and '_' in value_orig:
+                value = value_orig
+
+        return value
+
+    def _convert_string(self, s):
+        if s == 'T':
+            return True
+        elif s == 'F':
+            return False
+        else:
+            return s
+
+    def _extract_comm_or_hist_value(self):
+        card_string = self['card_string']
+        if self._has_equals:
+            if len(card_string) >= 9:
+                value = card_string[9:]
+            else:
+                value = ''
+        else:
+            if len(card_string) >= 8:
+                # value=card_string[7:]
+                value = card_string[8:]
+            else:
+                value = ''
+        return value
+
+    def _check_type(self):
+        card_string = self['card_string']
+        if not isstring(card_string):
+            raise TypeError(
+                "card must be a string, got type %s" % type(card_string))
+
+    def _check_len(self):
+        ln = len(self['card_string'])
+        if ln > 80:
+            mess = "len(card) is %d.  cards must have length < 80"
+            raise ValueError(mess)
diff --git a/fitsio/test.py b/fitsio/test.py
new file mode 100644 (file)
index 0000000..49d5e4c
--- /dev/null
@@ -0,0 +1,3039 @@
+from __future__ import with_statement, print_function
+import sys, os
+import tempfile
+import warnings
+import numpy
+from numpy import arange, array
+from pkg_resources import resource_filename
+import fitsio
+
+from ._fitsio_wrap import cfitsio_use_standard_strings
+
+import unittest
+
+try:
+    xrange=xrange
+except:
+    xrange=range
+
+lorem_ipsum = (
+    'Lorem ipsum dolor sit amet, consectetur adipiscing '
+    'elit, sed do eiusmod tempor incididunt ut labore '
+    'et dolore magna aliqua'
+)
+def test():
+    suite_warnings = unittest.TestLoader().loadTestsFromTestCase(TestWarnings)
+    res1=unittest.TextTestRunner(verbosity=2).run(suite_warnings).wasSuccessful()
+
+    suite = unittest.TestLoader().loadTestsFromTestCase(TestReadWrite)
+    res2=unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
+
+    if not res1 or not res2:
+        sys.exit(1)
+
+class TestWarnings(unittest.TestCase):
+    """
+    tests of warnings
+
+    TODO: write test cases for bad column size
+    """
+    def setUp(self):
+        pass
+
+    def testNonStandardKeyValue(self):
+        fname=tempfile.mktemp(prefix='fitsio-TestWarning-',suffix='.fits')
+
+        im=numpy.zeros( (3,3) )
+        with warnings.catch_warnings(record=True) as w:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write(im)
+                # now write a key with a non-standard value
+                value={'test':3}
+                fits[-1].write_key("odd",value)
+
+            # DeprecationWarnings have crept into the Warning list.  This will filter the list to be just
+            # FITSRuntimeWarning instances.
+            # @at88mph  2019.10.09
+            filtered_warnings = list(filter(lambda x: 'FITSRuntimeWarning' in '{}'.format(x.category), w))
+
+            assert len(filtered_warnings) == 1, 'Wrong length of output (Expected {} but got {}.)'.format(1, len(filtered_warnings))
+            assert issubclass(filtered_warnings[-1].category, fitsio.FITSRuntimeWarning)
+
+class TestReadWrite(unittest.TestCase):
+
+    def setUp(self):
+
+        nvec = 2
+        ashape=(21,21)
+        Sdtype = 'S6'
+        Udtype = 'U6'
+
+        # all currently available types, scalar, 1-d and 2-d array columns
+        dtype=[
+            ('u1scalar','u1'),
+            ('i1scalar','i1'),
+            ('b1scalar','?'),
+            ('u2scalar','u2'),
+            ('i2scalar','i2'),
+            ('u4scalar','u4'),
+            ('i4scalar','<i4'), # mix the byte orders a bit, test swapping
+            ('i8scalar','i8'),
+            ('f4scalar','f4'),
+            ('f8scalar','>f8'),
+            ('c8scalar','c8'), # complex, two 32-bit
+            ('c16scalar','c16'), # complex, two 32-bit
+
+            ('u1vec','u1',nvec),
+            ('i1vec','i1',nvec),
+            ('b1vec','?',nvec),
+            ('u2vec','u2',nvec),
+            ('i2vec','i2',nvec),
+            ('u4vec','u4',nvec),
+            ('i4vec','i4',nvec),
+            ('i8vec','i8',nvec),
+            ('f4vec','f4',nvec),
+            ('f8vec','f8',nvec),
+            ('c8vec','c8',nvec),
+            ('c16vec','c16',nvec),
+
+            ('u1arr','u1',ashape),
+            ('i1arr','i1',ashape),
+            ('b1arr','?',ashape),
+            ('u2arr','u2',ashape),
+            ('i2arr','i2',ashape),
+            ('u4arr','u4',ashape),
+            ('i4arr','i4',ashape),
+            ('i8arr','i8',ashape),
+            ('f4arr','f4',ashape),
+            ('f8arr','f8',ashape),
+            ('c8arr','c8',ashape),
+            ('c16arr','c16',ashape),
+
+            # special case of (1,)
+            ('f8arr_dim1','f8',(1,)),
+
+
+            ('Sscalar',Sdtype),
+            ('Svec',   Sdtype, nvec),
+            ('Sarr',   Sdtype, ashape),
+        ]
+
+        if cfitsio_use_standard_strings():
+            dtype += [
+                ('Sscalar_nopad',Sdtype),
+                ('Svec_nopad',   Sdtype, nvec),
+                ('Sarr_nopad',   Sdtype, ashape),
+            ]
+
+        if sys.version_info > (3,0,0):
+            dtype += [
+               ('Uscalar',Udtype),
+               ('Uvec',   Udtype, nvec),
+               ('Uarr',   Udtype, ashape),
+            ]
+
+            if cfitsio_use_standard_strings():
+                dtype += [
+                   ('Uscalar_nopad',Udtype),
+                   ('Uvec_nopad',   Udtype, nvec),
+                   ('Uarr_nopad',   Udtype, ashape),
+                ]
+
+
+        dtype2=[('index','i4'),
+                ('x','f8'),
+                ('y','f8')]
+
+        nrows=4
+        data=numpy.zeros(nrows, dtype=dtype)
+
+        dtypes=['u1','i1','u2','i2','u4','i4','i8','f4','f8','c8','c16']
+        for t in dtypes:
+            if t in ['c8','c16']:
+                data[t+'scalar'] = [complex(i+1,(i+1)*2) for i in xrange(nrows)]
+                vname=t+'vec'
+                for row in xrange(nrows):
+                    for i in xrange(nvec):
+                        index=(row+1)*(i+1)
+                        data[vname][row,i] = complex(index,index*2)
+                aname=t+'arr'
+                for row in xrange(nrows):
+                    for i in xrange(ashape[0]):
+                        for j in xrange(ashape[1]):
+                            index=(row+1)*(i+1)*(j+1)
+                            data[aname][row,i,j] = complex(index,index*2)
+
+            else:
+                data[t+'scalar'] = 1 + numpy.arange(nrows, dtype=t)
+                data[t+'vec'] = 1 + numpy.arange(nrows*nvec,dtype=t).reshape(nrows,nvec)
+                arr = 1 + numpy.arange(nrows*ashape[0]*ashape[1],dtype=t)
+                data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1])
+
+        for t in ['b1']:
+            data[t+'scalar'] = (numpy.arange(nrows) % 2 == 0).astype('?')
+            data[t+'vec'] = (numpy.arange(nrows*nvec) % 2 == 0).astype('?').reshape(nrows,nvec)
+            arr = (numpy.arange(nrows*ashape[0]*ashape[1]) % 2 == 0).astype('?')
+            data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1])
+
+
+        # strings get padded when written to the fits file.  And the way I do
+        # the read, I read all bytes (ala mrdfits) so the spaces are preserved.
+        #
+        # so we need to pad out the strings with blanks so we can compare
+
+        data['Sscalar'] = ['%-6s' % s for s in ['hello','world','good','bye']]
+        data['Svec'][:,0] = '%-6s' % 'hello'
+        data['Svec'][:,1] = '%-6s' % 'world'
+
+
+        s = 1 + numpy.arange(nrows*ashape[0]*ashape[1])
+        s = ['%-6s' % el for el in s]
+        data['Sarr'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1])
+
+        if cfitsio_use_standard_strings():
+            data['Sscalar_nopad'] = ['hello','world','good','bye']
+            data['Svec_nopad'][:,0] = 'hello'
+            data['Svec_nopad'][:,1] = 'world'
+
+            s = 1 + numpy.arange(nrows*ashape[0]*ashape[1])
+            s = ['%s' % el for el in s]
+            data['Sarr_nopad'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1])
+
+        if sys.version_info >= (3, 0, 0):
+            data['Uscalar'] = ['%-6s' % s for s in ['hello','world','good','bye']]
+            data['Uvec'][:,0] = '%-6s' % 'hello'
+            data['Uvec'][:,1] = '%-6s' % 'world'
+
+            s = 1 + numpy.arange(nrows*ashape[0]*ashape[1])
+            s = ['%-6s' % el for el in s]
+            data['Uarr'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1])
+
+
+            if cfitsio_use_standard_strings():
+                data['Uscalar_nopad'] = ['hello','world','good','bye']
+                data['Uvec_nopad'][:,0] = 'hello'
+                data['Uvec_nopad'][:,1] = 'world'
+
+                s = 1 + numpy.arange(nrows*ashape[0]*ashape[1])
+                s = ['%s' % el for el in s]
+                data['Uarr_nopad'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1])
+
+        self.data = data
+
+        # use a dict list so we can have comments
+        # for long key we used the largest possible
+        self.keys = [{'name':'test1','value':35},
+                     {'name':'empty','value':''},
+                     {'name':'long_keyword_name','value':'stuff'},
+                     {'name':'test2','value':'stuff','comment':'this is a string keyword'},
+                     {'name':'dbl', 'value':23.299843,'comment':"this is a double keyword"},
+                     {'name':'edbl', 'value':1.384123233e+43,'comment':"double keyword with exponent"},
+                     {'name':'lng','value':2**63-1,'comment':'this is a long keyword'},
+                     {'name':'lngstr','value':lorem_ipsum,'comment':'long string'}]
+
+        # a second extension using the convenience function
+        nrows2=10
+        data2 = numpy.zeros(nrows2, dtype=dtype2)
+        data2['index'] = numpy.arange(nrows2,dtype='i4')
+        data2['x'] = numpy.arange(nrows2,dtype='f8')
+        data2['y'] = numpy.arange(nrows2,dtype='f8')
+        self.data2 = data2
+
+        #
+        # ascii table
+        #
+
+        nvec = 2
+        ashape = (2,3)
+        Sdtype = 'S6'
+        Udtype = 'U6'
+
+        # we support writing i2, i4, i8, f4 f8, but when reading cfitsio always
+        # reports their types as i4 and f8, so can't really use i8 and we are
+        # forced to read all floats as f8 precision
+
+        adtype=[('i2scalar','i2'),
+                ('i4scalar','i4'),
+                #('i8scalar','i8'),
+                ('f4scalar','f4'),
+                ('f8scalar','f8'),
+                ('Sscalar',Sdtype)]
+        if sys.version_info >= (3, 0, 0):
+            adtype += [('Uscalar', Udtype)]
+
+        nrows=4
+        try:
+            tdt = numpy.dtype(adtype, align=True)
+        except TypeError: # older numpy may not understand `align` argument
+            tdt = numpy.dtype(adtype)
+        adata=numpy.zeros(nrows, dtype=tdt)
+
+        adata['i2scalar'][:] = -32222  + numpy.arange(nrows,dtype='i2')
+        adata['i4scalar'][:] = -1353423423 + numpy.arange(nrows,dtype='i4')
+        #adata['i8scalar'][:] = -9223372036854775807 + numpy.arange(nrows,dtype='i8')
+        adata['f4scalar'][:] = -2.55555555555555555555555e35 + numpy.arange(nrows,dtype='f4')*1.e35
+        adata['f8scalar'][:] = -2.55555555555555555555555e110 + numpy.arange(nrows,dtype='f8')*1.e110
+        adata['Sscalar'] = ['hello','world','good','bye']
+
+        if sys.version_info >= (3, 0, 0):
+            adata['Uscalar'] = ['hello','world','good','bye']
+
+        self.ascii_data = adata
+
+
+
+        #
+        # for variable length columns
+        #
+
+        # all currently available types, scalar, 1-d and 2-d array columns
+        dtype=[
+            ('u1scalar','u1'),
+            ('u1obj','O'),
+            ('i1scalar','i1'),
+            ('i1obj','O'),
+            ('u2scalar','u2'),
+            ('u2obj','O'),
+            ('i2scalar','i2'),
+            ('i2obj','O'),
+            ('u4scalar','u4'),
+            ('u4obj','O'),
+            ('i4scalar','<i4'), # mix the byte orders a bit, test swapping
+            ('i4obj','O'),
+            ('i8scalar','i8'),
+            ('i8obj','O'),
+            ('f4scalar','f4'),
+            ('f4obj','O'),
+            ('f8scalar','>f8'),
+            ('f8obj','O'),
+
+            ('u1vec','u1',nvec),
+            ('i1vec','i1',nvec),
+            ('u2vec','u2',nvec),
+            ('i2vec','i2',nvec),
+            ('u4vec','u4',nvec),
+            ('i4vec','i4',nvec),
+            ('i8vec','i8',nvec),
+            ('f4vec','f4',nvec),
+            ('f8vec','f8',nvec),
+
+            ('u1arr','u1',ashape),
+            ('i1arr','i1',ashape),
+            ('u2arr','u2',ashape),
+            ('i2arr','i2',ashape),
+            ('u4arr','u4',ashape),
+            ('i4arr','i4',ashape),
+            ('i8arr','i8',ashape),
+            ('f4arr','f4',ashape),
+            ('f8arr','f8',ashape),
+
+            # special case of (1,)
+            ('f8arr_dim1','f8',(1,)),
+
+            ('Sscalar',Sdtype),
+            ('Sobj','O'),
+            ('Svec',   Sdtype, nvec),
+            ('Sarr',   Sdtype, ashape),
+        ]
+
+        if sys.version_info > (3,0,0):
+            dtype += [
+               ('Uscalar',Udtype),
+               ('Uvec',   Udtype, nvec),
+               ('Uarr',   Udtype, ashape)]
+
+        dtype2=[('index','i4'),
+                ('x','f8'),
+                ('y','f8')]
+
+        nrows=4
+        data=numpy.zeros(nrows, dtype=dtype)
+
+        for t in ['u1','i1','u2','i2','u4','i4','i8','f4','f8']:
+            data[t+'scalar'] = 1 + numpy.arange(nrows, dtype=t)
+            data[t+'vec'] = 1 + numpy.arange(nrows*nvec,dtype=t).reshape(nrows,nvec)
+            arr = 1 + numpy.arange(nrows*ashape[0]*ashape[1],dtype=t)
+            data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1])
+
+            for i in xrange(nrows):
+                data[t+'obj'][i] = data[t+'vec'][i]
+
+
+        # strings get padded when written to the fits file.  And the way I do
+        # the read, I real all bytes (ala mrdfits) so the spaces are preserved.
+        #
+        # so for comparisons, we need to pad out the strings with blanks so we
+        # can compare
+
+        data['Sscalar'] = ['%-6s' % s for s in ['hello','world','good','bye']]
+        data['Svec'][:,0] = '%-6s' % 'hello'
+        data['Svec'][:,1] = '%-6s' % 'world'
+
+        s = 1 + numpy.arange(nrows*ashape[0]*ashape[1])
+        s = ['%-6s' % el for el in s]
+        data['Sarr'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1])
+
+        if sys.version_info >= (3, 0, 0):
+            data['Uscalar'] = ['%-6s' % s for s in ['hello','world','good','bye']]
+            data['Uvec'][:,0] = '%-6s' % 'hello'
+            data['Uvec'][:,1] = '%-6s' % 'world'
+
+            s = 1 + numpy.arange(nrows*ashape[0]*ashape[1])
+            s = ['%-6s' % el for el in s]
+            data['Uarr'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1])
+
+        for i in xrange(nrows):
+            data['Sobj'][i] = data['Sscalar'][i].rstrip()
+
+        self.vardata = data
+
+        #
+        # for bitcol columns
+        #
+        nvec = 2
+        ashape=(21,21)
+
+        dtype=[('b1vec','?',nvec),
+
+               ('b1arr','?',ashape)]
+
+        nrows=4
+        data=numpy.zeros(nrows, dtype=dtype)
+
+        for t in ['b1']:
+            data[t+'vec'] = (numpy.arange(nrows*nvec) % 2 == 0).astype('?').reshape(nrows,nvec)
+            arr = (numpy.arange(nrows*ashape[0]*ashape[1]) % 2 == 0).astype('?')
+            data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1])
+
+        self.bdata = data
+
+
+    def testHeaderWriteRead(self):
+        """
+        Test a basic header write and read
+
+        Note the other read/write tests also are checking header writing with
+        a list of dicts
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-HeaderWrite-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                data=numpy.zeros(10)
+                header={
+                    'x':35,
+                    'y':88.215,
+                    'eval':1.384123233e+43,
+                    'empty':'',
+                    'funky':'35-8', # test old bug when strings look
+                                    #like expressions
+                    'name':'J. Smith',
+                    'what': '89113e6', # test bug where converted to float
+                    'und':None,
+                    'binop':'25-3', # test string with binary operation in it
+                    'unders':'1_000_000', # test string with underscore
+                    'longs':lorem_ipsum,
+                }
+                fits.write_image(data, header=header)
+
+                rh = fits[0].read_header()
+                self.check_header(header, rh)
+
+            with fitsio.FITS(fname) as fits:
+                rh = fits[0].read_header()
+                self.check_header(header, rh)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testReadHeaderCase(self):
+        """
+        Test read_header with and without case sensitivity
+
+        The reason we need a special test for this is because
+        the read_header code is optimized for speed and has
+        a different code path
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-HeaderCase-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                data=numpy.zeros(10)
+                fits.write_image(data, header=self.keys, extname='First')
+                fits.write_image(data, header=self.keys, extname='second')
+
+            cases = [
+                ('First',True),
+                ('FIRST',False),
+                ('second',True),
+                ('seConD',False),
+            ]
+            for ext,ci in cases:
+                h = fitsio.read_header(fname,ext=ext,case_sensitive=ci)
+                self.compare_headerlist_header(self.keys, h)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testHeaderCommentPreserved(self):
+        """
+        Test that the comment is preserved after resetting the value
+        """
+
+        l1 = 'KEY1    =                   77 / My comment1'
+        l2 = 'KEY2    =                   88 / My comment2'
+        hdr=fitsio.FITSHDR()
+        hdr.add_record(l1)
+        hdr.add_record(l2)
+
+        hdr['key1'] = 99
+        self.assertEqual(hdr.get_comment('key1'), 'My comment1',
+                         'comment not preserved')
+
+    def testBlankKeyComments(self):
+        """
+        test a few different comments
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-HeaderComments-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                records = [
+                    # empty should return empty
+                    {'name':None, 'value':'', 'comment':''},
+                    # this will also return empty
+                    {'name':None, 'value':'', 'comment':' '},
+                    # this will return exactly
+                    {'name':None, 'value':'', 'comment':' h'},
+                    # this will return exactly
+                    {'name':None, 'value':'', 'comment':'--- test comment ---'},
+                ]
+                header = fitsio.FITSHDR(records)
+
+                fits.write(None, header=header)
+
+                rh = fits[0].read_header()
+
+                rrecords = rh.records()
+
+                for i, ri in ((0, 6), (1,7), (2, 8)):
+                    rec = records[i]
+                    rrec = rrecords[ri]
+
+                    self.assertEqual(
+                        rec['name'],
+                        None,
+                        'checking name is None',
+                    )
+                    comment = rec['comment']
+                    rcomment = rrec['comment']
+                    if '' == comment.strip():
+                        comment = ''
+
+                    self.assertEqual(
+                        comment,
+                        rcomment,
+                        "check empty key comment",
+                    )
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testBlankKeyCommentsFromCards(self):
+        """
+        test a few different comments
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-HeaderComments-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                records = [
+                    '                                                                                ',
+                    '         --- testing comment ---                                                ',
+                    '        --- testing comment ---                                                 ',
+                    "COMMENT testing                                                                 ",
+                ]
+                header = fitsio.FITSHDR(records)
+
+                fits.write(None, header=header)
+
+                rh = fits[0].read_header()
+
+                rrecords = rh.records()
+                from pprint import pprint
+                # print()
+                # pprint(rrecords)
+
+                self.assertEqual(
+                    rrecords[6]['name'],
+                    None,
+                    'checking name is None',
+                )
+                self.assertEqual(
+                    rrecords[6]['comment'],
+                    '',
+                    "check empty key comment",
+                )
+                self.assertEqual(
+                    rrecords[7]['name'],
+                    None,
+                    'checking name is None',
+                )
+                self.assertEqual(
+                    rrecords[7]['comment'],
+                    ' --- testing comment ---',
+                    "check empty key comment",
+                )
+                self.assertEqual(
+                    rrecords[8]['name'],
+                    None,
+                    'checking name is None',
+                )
+                self.assertEqual(
+                    rrecords[8]['comment'],
+                    '--- testing comment ---',
+                    "check empty key comment",
+                )
+
+
+                self.assertEqual(
+                    rrecords[9]['name'],
+                    'COMMENT',
+                    'checking name is COMMENT',
+                )
+                self.assertEqual(
+                    rrecords[9]['comment'],
+                    'testing',
+                    "check comment",
+                )
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testHeaderFromCards(self):
+        """
+        test generating a header from cards, writing it out and getting
+        back what we put in
+        """
+        hdr_from_cards=fitsio.FITSHDR([
+            "IVAL    =                   35 / integer value                                  ",
+            "SHORTS  = 'hello world'                                                         ",
+            "UND     =                                                                       ",
+            "LONGS   = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiu&'",
+            "CONTINUE  'smod tempor incididunt ut labore et dolore magna aliqua'             ",
+            "DBL     =                 1.25                                                  ",
+        ])
+        header = [
+            {'name':'ival','value':35,'comment':'integer value'},
+            {'name':'shorts','value':'hello world'},
+            {'name':'und','value':None},
+            {'name':'longs','value':lorem_ipsum},
+            {'name':'dbl','value':1.25},
+        ]
+
+        fname=tempfile.mktemp(prefix='fitsio-HeaderFromCars-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                data=numpy.zeros(10)
+                fits.write_image(data, header=hdr_from_cards)
+
+                rh = fits[0].read_header()
+                self.compare_headerlist_header(header, rh)
+
+            with fitsio.FITS(fname) as fits:
+                rh = fits[0].read_header()
+                self.compare_headerlist_header(header, rh)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testHeaderJunk(self):
+        """
+        test lenient treatment of garbage written by IDL mwrfits
+        """
+
+        data="""SIMPLE  =                    T /Primary Header created by MWRFITS v1.11         BITPIX  =                   16 /                                                NAXIS   =                    0 /                                                EXTEND  =                    T /Extensions may be present                       BLAT    =                    1 /integer                                         FOO     =              1.00000 /float (or double?)                              BAR     =                  NAN /float NaN                                       BIZ     =                  NaN /double NaN                                      BAT     =                  INF /1.0 / 0.0                                       BOO     =                 -INF /-1.0 / 0.0                                      QUAT    = '        '           /blank string                                    QUIP    = '1.0     '           /number in quotes                                QUIZ    = ' 1.0    '           /number in quotes with a leading space           QUIL    = 'NaN     '           /NaN in quotes                                   QUID    = 'Inf     '           /Inf in quotes                                   END                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             """ # noqa
+
+        fname=tempfile.mktemp(prefix='fitsio-HeaderJunk-',suffix='.fits')
+        try:
+            with open(fname,'w') as fobj:
+                fobj.write(data)
+
+            h = fitsio.read_header(fname)
+            self.assertEqual(h['bar'],'NAN', "NAN garbage")
+            self.assertEqual(h['biz'],'NaN', "NaN garbage")
+            self.assertEqual(h['bat'],'INF', "INF garbage")
+            self.assertEqual(h['boo'],'-INF', "-INF garbage")
+            self.assertEqual(h['quat'], '', 'blank')
+            self.assertEqual(h['quip'], '1.0', '1.0 in quotes')
+            self.assertEqual(h['quiz'], ' 1.0', '1.0 in quotes')
+            self.assertEqual(h['quil'], 'NaN', 'NaN in quotes')
+            self.assertEqual(h['quid'], 'Inf', 'Inf in quotes')
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testHeaderTemplate(self):
+        """
+        test adding bunch of cards from a split template
+        """
+
+        header_template = """SIMPLE  =                    T /
+BITPIX  =                    8 / bits per data value
+NAXIS   =                    0 / number of axes
+EXTEND  =                    T / Extensions are permitted
+ORIGIN  = 'LSST DM Header Service'/ FITS file originator
+
+         ---- Date, night and basic image information ----
+DATE    =                      / Creation Date and Time of File
+DATE-OBS=                      / Date of the observation (image acquisition)
+DATE-BEG=                      / Time at the start of integration
+DATE-END=                      / end date of the observation
+MJD     =                      / Modified Julian Date that the file was written
+MJD-OBS =                      / Modified Julian Date of observation
+MJD-BEG =                      / Modified Julian Date derived from DATE-BEG
+MJD-END =                      / Modified Julian Date derived from DATE-END
+OBSID   =                      / ImageName from Camera StartIntergration
+GROUPID =                      / imageSequenceName from StartIntergration
+OBSTYPE =                      / BIAS, DARK, FLAT, OBJECT
+BUNIT   = 'adu     '           / Brightness units for pixel array
+
+         ---- Telescope info, location, observer ----
+TELESCOP= 'LSST AuxTelescope'  / Telescope name
+INSTRUME= 'LATISS'             / Instrument used to obtain these data
+OBSERVER= 'LSST'               / Observer name(s)
+OBS-LONG=           -70.749417 / [deg] Observatory east longitude
+OBS-LAT =           -30.244639 / [deg] Observatory latitude
+OBS-ELEV=               2663.0 / [m] Observatory elevation
+OBSGEO-X=           1818938.94 / [m] X-axis Geocentric coordinate
+OBSGEO-Y=          -5208470.95 / [m] Y-axis Geocentric coordinate
+OBSGEO-Z=          -3195172.08 / [m] Z-axis Geocentric coordinate
+
+        ---- Pointing info, etc. ----
+
+DECTEL  =                      / Telescope DEC of observation
+ROTPATEL=                      / Telescope Rotation
+ROTCOORD= 'sky'                / Telescope Rotation Coordinates
+RA      =                      / RA of Target
+DEC     =                      / DEC of Target
+ROTPA   =                      / Rotation angle relative to the sky (deg)
+HASTART =                      / [HH:MM:SS] Telescope hour angle at start
+ELSTART =                      / [deg] Telescope zenith distance at start
+AZSTART =                      / [deg] Telescope azimuth angle at start
+AMSTART =                      / Airmass at start
+HAEND   =                      / [HH:MM:SS] Telescope hour angle at end
+ELEND   =                      / [deg] Telescope zenith distance at end
+AZEND   =                      / [deg] Telescope azimuth angle at end
+AMEND   =                      / Airmass at end
+
+        ---- Image-identifying used to build OBS-ID ----
+TELCODE = 'AT'                 / The code for the telecope
+CONTRLLR=                      / The controller (e.g. O for OCS, C for CCS)
+DAYOBS  =                      / The observation day as defined by image name
+SEQNUM  =                      / The sequence number from the image name
+GROUPID =                      /
+
+        ---- Information from Camera
+CCD_MANU= 'ITL'                / CCD Manufacturer
+CCD_TYPE= '3800C'              / CCD Model Number
+CCD_SERN= '20304'              / Manufacturers? CCD Serial Number
+LSST_NUM= 'ITL-3800C-098'      / LSST Assigned CCD Number
+SEQCKSUM=                      / Checksum of Sequencer
+SEQNAME =                      / SequenceName from Camera StartIntergration
+REBNAME =                      / Name of the REB
+CONTNUM =                      / CCD Controller (WREB) Serial Number
+IMAGETAG=                      / DAQ Image id
+TEMP_SET=                      / Temperature set point (deg C)
+CCDTEMP =                      / Measured temperature (deg C)
+
+        ---- Geometry from Camera ----
+DETSIZE =                      / Size of sensor
+OVERH   =                      / Over-scan pixels
+OVERV   =                      / Vert-overscan pix
+PREH    =                      / Pre-scan pixels
+
+        ---- Filter/grating information ----
+FILTER  =                      / Name of the filter
+FILTPOS =                      / Filter position
+GRATING =                      / Name of the second disperser
+GRATPOS =                      / disperser position
+LINSPOS =                      / Linear Stage
+
+        ---- Exposure-related information ----
+EXPTIME =                      / Exposure time in seconds
+SHUTTIME=                      / Shutter exposure time in seconds
+DARKTIME=                      / Dark time in seconds
+
+        ---- Header information ----
+FILENAME=                      / Original file name
+HEADVER =                      / Version of header
+
+        ---- Checksums ----
+CHECKSUM=                      / checksum for the current HDU
+DATASUM =                      / checksum of the data records\n"""
+
+        lines = header_template.splitlines()
+        hdr = fitsio.FITSHDR()
+        for l in lines:
+            hdr.add_record(l)
+
+    def testCorruptContinue(self):
+        """
+        test with corrupt continue, just make sure it doesn't crash
+        """
+        with warnings.catch_warnings(record=True) as w:
+            fname=tempfile.mktemp(prefix='fitsio-TestCorruptContinue-',suffix='.fits')
+
+            hdr_from_cards=fitsio.FITSHDR([
+                "IVAL    =                   35 / integer value                                  ",
+                "SHORTS  = 'hello world'                                                         ",
+                "CONTINUE= '        '           /   '&' / Current observing orogram              ",
+                "UND     =                                                                       ",
+                "DBL     =                 1.25                                                  ",
+            ])
+
+            try:
+                with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                    fits.write(None, header=hdr_from_cards)
+
+                rhdr = fitsio.read_header(fname)
+
+            finally:
+                if os.path.exists(fname):
+                    os.remove(fname)
+
+        with warnings.catch_warnings(record=True) as w:
+            fname=tempfile.mktemp(prefix='fitsio-TestCorruptContinue-',suffix='.fits')
+
+            hdr_from_cards=fitsio.FITSHDR([
+                "IVAL    =                   35 / integer value                                  ",
+                "SHORTS  = 'hello world'                                                         ",
+                "PROGRAM = 'Setting the Scale: Determining the Absolute Mass Normalization and &'",
+                "CONTINUE  'Scaling Relations for Clusters at z~0.1&'                            ",
+                "CONTINUE  '&' / Current observing orogram                                       ",
+                "UND     =                                                                       ",
+                "DBL     =                 1.25                                                  ",
+            ])
+
+            try:
+                with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                    fits.write(None, header=hdr_from_cards)
+
+                rhdr = fitsio.read_header(fname)
+
+            finally:
+                if os.path.exists(fname):
+                    os.remove(fname)
+
+
+    def testImageWriteRead(self):
+        """
+        Test a basic image write, data and a header, then reading back in to
+        check the values
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits')
+        dtypes=['u1','i1','u2','i2','<u4','i4','i8','>f4','f8']
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                # note mixing up byte orders a bit
+                for dtype in dtypes:
+                    data = numpy.arange(5*20,dtype=dtype).reshape(5,20)
+                    header={'DTYPE':dtype,'NBYTES':data.dtype.itemsize}
+                    fits.write_image(data, header=header)
+                    rdata = fits[-1].read()
+
+                    self.compare_array(data, rdata, "images")
+
+                    rh = fits[-1].read_header()
+                    self.check_header(header, rh)
+
+            with fitsio.FITS(fname) as fits:
+                for i in xrange(len(dtypes)):
+                    self.assertEqual(fits[i].is_compressed(), False, "not compressed")
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testImageWriteEmpty(self):
+        """
+        Test a basic image write, with no data and just a header, then reading
+        back in to check the values
+        """
+        fname=tempfile.mktemp(prefix='fitsio-ImageWriteEmpty-',suffix='.fits')
+        try:
+            data=None
+            header={'EXPTIME':120, 'OBSERVER':'Beatrice Tinsley','INSTRUME':'DECam','FILTER':'r'}
+            with fitsio.FITS(fname,'rw',clobber=True, ignore_empty=True) as fits:
+                for extname in ['CCD1','CCD2','CCD3','CCD4','CCD5','CCD6','CCD7','CCD8']:
+                    fits.write_image(data, header=header)
+                    rdata = fits[-1].read()
+                    rh = fits[-1].read_header()
+                    self.check_header(header, rh)
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testImageWriteReadFromDims(self):
+        """
+        Test creating an image from dims and writing in place
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-ImageWriteFromDims-',suffix='.fits')
+        dtypes=['u1','i1','u2','i2','<u4','i4','i8','>f4','f8']
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                # note mixing up byte orders a bit
+                for dtype in dtypes:
+                    data = numpy.arange(5*20,dtype=dtype).reshape(5,20)
+
+                    fits.create_image_hdu(dims=data.shape,
+                                          dtype=data.dtype)
+
+                    fits[-1].write(data)
+                    rdata = fits[-1].read()
+
+                    self.compare_array(data, rdata, "images")
+
+            with fitsio.FITS(fname) as fits:
+                for i in xrange(len(dtypes)):
+                    self.assertEqual(fits[i].is_compressed(), False, "not compressed")
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testImageWriteReadFromDimsChunks(self):
+        """
+        Test creating an image and reading/writing chunks
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-ImageWriteFromDims-',suffix='.fits')
+        dtypes=['u1','i1','u2','i2','<u4','i4','i8','>f4','f8']
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                # note mixing up byte orders a bit
+                for dtype in dtypes:
+                    data = numpy.arange(5*3,dtype=dtype).reshape(5,3)
+
+                    fits.create_image_hdu(dims=data.shape,
+                                          dtype=data.dtype)
+
+                    chunk1 = data[0:2, :]
+                    chunk2 = data[2: , :]
+
+                    #
+                    # first using scalar pixel offset
+                    #
+
+                    fits[-1].write(chunk1)
+
+                    start=chunk1.size
+                    fits[-1].write(chunk2, start=start)
+
+                    rdata = fits[-1].read()
+
+                    self.compare_array(data, rdata, "images")
+
+
+                    #
+                    # now using sequence, easier to calculate
+                    #
+
+                    fits.create_image_hdu(dims=data.shape,
+                                          dtype=data.dtype)
+
+                    # first using pixel offset
+                    fits[-1].write(chunk1)
+
+                    start=[2,0]
+                    fits[-1].write(chunk2, start=start)
+
+                    rdata2 = fits[-1].read()
+
+                    self.compare_array(data, rdata2, "images")
+
+
+            with fitsio.FITS(fname) as fits:
+                for i in xrange(len(dtypes)):
+                    self.assertEqual(fits[i].is_compressed(), False, "not compressed")
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testImageSlice(self):
+        """
+        test reading an image slice
+        """
+        fname=tempfile.mktemp(prefix='fitsio-ImageSlice-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                # note mixing up byte orders a bit
+                for dtype in ['u1','i1','u2','i2','<u4','i4','i8','>f4','f8']:
+                    data = numpy.arange(16*20,dtype=dtype).reshape(16,20)
+                    header={'DTYPE':dtype,'NBYTES':data.dtype.itemsize}
+                    fits.write_image(data, header=header)
+                    rdata = fits[-1][4:12, 9:17]
+
+                    self.compare_array(data[4:12,9:17], rdata, "images")
+
+                    rh = fits[-1].read_header()
+                    self.check_header(header, rh)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testReadFlipAxisSlice(self):
+        """
+        Test reading a slice when the slice's start is less than the slice's stop.
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-ReadFlipAxisSlice-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname, 'rw', clobber=True) as fits:
+                dtype = numpy.int16
+                data = numpy.arange(100 * 200, dtype=dtype).reshape(100, 200)
+                fits.write_image(data)
+                hdu = fits[-1]
+                rdata = hdu[:,130:70]
+
+                # Expanded by two to emulate adding one to the start value, and adding one to the calculated dimension.
+                expected_data = data[:,130:70:-1]
+
+                numpy.testing.assert_array_equal(expected_data, rdata,
+                        "Data are not the same (Expected shape: {}, actual shape: {}.".format(
+                            expected_data.shape, rdata.shape))
+
+                rdata = hdu[:,130:70:-6]
+
+                # Expanded by two to emulate adding one to the start value, and adding one to the calculated dimension.
+                expected_data = data[:,130:70:-6]
+
+                numpy.testing.assert_array_equal(expected_data, rdata,
+                        "Data are not the same (Expected shape: {}, actual shape: {}.".format(
+                            expected_data.shape, rdata.shape))
+
+
+                rdata = hdu[:,90:60:4]  # Positive step integer with start > stop will return an empty array
+                expected_data = numpy.empty(0, dtype=dtype)
+                numpy.testing.assert_array_equal(expected_data, rdata,
+                        "Data are not the same (Expected shape: {}, actual shape: {}.".format(
+                            expected_data.shape, rdata.shape))
+
+                rdata = hdu[:,60:90:-4]  # Negative step integer with start < stop will return an empty array.
+                expected_data = numpy.empty(0, dtype=dtype)
+                numpy.testing.assert_array_equal(expected_data, rdata,
+                        "Data are not the same (Expected shape: {}, actual shape: {}.".format(
+                            expected_data.shape, rdata.shape))
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testImageSliceStriding(self):
+        """
+        test reading an image slice
+        """
+        fname=tempfile.mktemp(prefix='fitsio-ImageSliceStriding-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                # note mixing up byte orders a bit
+                for dtype in ['u1','i1','u2','i2','<u4','i4','i8','>f4','f8']:
+                    data = numpy.arange(16*20,dtype=dtype).reshape(16,20)
+                    header={'DTYPE':dtype,'NBYTES':data.dtype.itemsize}
+                    fits.write_image(data, header=header)
+
+                    rdata = fits[-1][4:16:4, 2:20:2]
+                    expected_data = data[4:16:4, 2:20:2]
+                    self.assertEqual(rdata.shape, expected_data.shape, "Shapes differ with dtype %s" % dtype)
+                    self.compare_array(expected_data, rdata, "images with dtype %s" % dtype)
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testRiceTileCompressedWriteRead(self):
+        """
+        Test writing and reading a rice compressed image
+        """
+        nrows=30
+        ncols=100
+        tile_dims=[5,10]
+        compress='rice'
+        fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz')
+        dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8']
+
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                # note i8 not supported for compressed!
+
+                for dtype in dtypes:
+                    if dtype[0] == 'f':
+                        data = numpy.random.normal(size=nrows*ncols).reshape(nrows,ncols).astype(dtype)
+                    else:
+                        data = numpy.arange(nrows*ncols,dtype=dtype).reshape(nrows,ncols)
+
+                    fits.write_image(data, compress=compress, qlevel=16)
+                    rdata = fits[-1].read()
+
+                    if dtype[0] == 'f':
+                        self.compare_array_abstol(
+                            data,
+                            rdata,
+                            0.2,
+                            "%s compressed images ('%s')" % (compress,dtype),
+                        )
+                    else:
+                        # for integers we have chosen a wide range of values, so
+                        # there will be no quantization and we expect no information
+                        # loss
+                        self.compare_array(data, rdata,
+                                           "%s compressed images ('%s')" % (compress,dtype))
+
+            with fitsio.FITS(fname) as fits:
+                for ii in xrange(len(dtypes)):
+                    i=ii+1
+                    self.assertEqual(fits[i].is_compressed(), True, "is compressed")
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testPLIOTileCompressedWriteRead(self):
+        """
+        Test writing and reading gzip compressed image
+        """
+
+        compress='plio'
+        fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                dtypes = ['i1','i2','i4','f4','f8']
+
+                for dtype in dtypes:
+
+                    if dtype[0] == 'f':
+                        data = numpy.random.normal(size=5*20).reshape(5,20).astype(dtype).clip(min=0)
+                    else:
+                        data = numpy.arange(5*20, dtype=dtype).reshape(5,20)
+
+                    fits.write_image(data, compress=compress, qlevel=16)
+                    rdata = fits[-1].read()
+
+                    if dtype[0] == 'f':
+                        self.compare_array_abstol(
+                            data,
+                            rdata,
+                            0.2,
+                            "%s compressed images ('%s')" % (compress,dtype),
+                        )
+                    else:
+                        # for integers we have chosen a wide range of values, so
+                        # there will be no quantization and we expect no information
+                        # loss
+                        self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype))
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testGZIPTileCompressedWriteRead(self):
+        """
+        Test writing and reading gzip compressed image
+        """
+
+        for compress in ['gzip', 'gzip_2']:
+            fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz')
+            try:
+                with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                    dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8']
+
+                    for dtype in dtypes:
+
+                        if dtype[0] == 'f':
+                            data = numpy.random.normal(size=5*20).reshape(5,20).astype(dtype)
+                        else:
+                            data = numpy.arange(5*20, dtype=dtype).reshape(5,20)
+
+                        fits.write_image(data, compress=compress, qlevel=16)
+                        rdata = fits[-1].read()
+
+                        if dtype[0] == 'f':
+                            self.compare_array_abstol(
+                                data,
+                                rdata,
+                                0.2,
+                                "%s compressed images ('%s')" % (compress,dtype),
+                            )
+                        else:
+                            # for integers we have chosen a wide range of values, so
+                            # there will be no quantization and we expect no information
+                            # loss
+                            self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype))
+
+            finally:
+                if os.path.exists(fname):
+                    os.remove(fname)
+
+    def testGZIPTileCompressedWriteReadLossless(self):
+        """
+        Test writing and reading gzip compressed image
+        """
+
+        for compress in ['gzip', 'gzip_2']:
+            fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz')
+            try:
+                with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                    # note i8 not supported for compressed!
+                    dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8']
+
+                    for dtype in dtypes:
+                        data = numpy.random.normal(size=50*20).reshape(50, 20)
+                        fits.write_image(data, compress=compress, qlevel=None)
+                        rdata = fits[-1].read()
+
+                        self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype))
+
+            finally:
+                if os.path.exists(fname):
+                    os.remove(fname)
+
+    def testGZIPTileCompressedReadLosslessAstropy(self):
+        """
+        Test reading an image gzip compressed by astropy (fixed by cfitsio 3.49)
+        """
+        gzip_file = resource_filename(__name__, 'test_images/test_gzip_compressed_image.fits.fz')
+        data = fitsio.read(gzip_file)
+        self.compare_array(data, data*0.0, "astropy lossless compressed image")
+
+    def testHCompressTileCompressedWriteRead(self):
+        """
+        Test writing and reading gzip compressed image
+        """
+
+        compress='hcompress'
+        fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8']
+
+                for dtype in dtypes:
+
+                    if dtype[0] == 'f':
+                        data = numpy.random.normal(size=5*20).reshape(5,20).astype(dtype)
+                    else:
+                        data = numpy.arange(5*20, dtype=dtype).reshape(5,20)
+
+                    # smoke test on these keywords
+                    fits.write_image(data, compress=compress, qlevel=16,
+                                     hcomp_scale=1, hcomp_smooth=True)
+
+                    fits.write_image(data, compress=compress, qlevel=16)
+                    rdata = fits[-1].read()
+
+                    if dtype[0] == 'f':
+                        self.compare_array_abstol(
+                            data,
+                            rdata,
+                            0.2,
+                            "%s compressed images ('%s')" % (compress,dtype),
+                        )
+                    else:
+                        # for integers we have chosen a wide range of values, so
+                        # there will be no quantization and we expect no information
+                        # loss
+                        self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype))
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testCompressPreserveZeros(self):
+        """
+        Test writing and reading gzip compressed image
+        """
+
+        zinds = [
+            (1, 3),
+            (2, 9),
+        ]
+        for compress in ['gzip', 'gzip_2', 'rice', 'hcompress']:
+            fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz')
+            try:
+                with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                    dtypes = ['f4','f8']
+
+                    for dtype in dtypes:
+
+                        data = numpy.random.normal(size=5*20).reshape(5,20).astype(dtype)
+                        for zind in zinds:
+                            data[zind[0], zind[1]] = 0.0
+
+                        fits.write_image(
+                            data,
+                            compress=compress,
+                            qlevel=16,
+                            qmethod='SUBTRACTIVE_DITHER_2',
+                        )
+                        rdata = fits[-1].read()
+
+                        for zind in zinds:
+                            assert rdata[zind[0], zind[1]] == 0.0
+
+
+            finally:
+                if os.path.exists(fname):
+                    os.remove(fname)
+
+    def testReadIgnoreScaling(self):
+        """
+        Test the flag to ignore scaling when reading an HDU.
+        """
+        fname = tempfile.mktemp(prefix='fitsio-ReadIgnoreScaling-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                dtype = 'i2'
+                data = numpy.arange(10 * 20, dtype=dtype).reshape(10, 20)
+                header={
+                    'DTYPE': dtype,
+                    'BITPIX': 16,
+                    'NBYTES': data.dtype.itemsize,
+                    'BZERO': 9.33,
+                    'BSCALE': 3.281
+                    }
+
+                fits.write_image(data, header=header)
+                hdu = fits[-1]
+
+                rdata = hdu.read()
+                self.assertEqual(rdata.dtype, numpy.float32, 'Wrong dtype.')
+
+                hdu.ignore_scaling = True
+                rdata = hdu[:,:]
+                self.assertEqual(rdata.dtype, dtype, 'Wrong dtype when ignoring.')
+                numpy.testing.assert_array_equal(data, rdata, err_msg='Wrong unscaled data.')
+
+                rh = fits[-1].read_header()
+                self.check_header(header, rh)
+
+                hdu.ignore_scaling = False
+                rdata = hdu[:,:]
+                self.assertEqual(rdata.dtype, numpy.float32, 'Wrong dtype when not ignoring.')
+                numpy.testing.assert_array_equal(data.astype(numpy.float32), rdata, err_msg='Wrong scaled data returned.')
+        finally:
+            # Clean up, if necessary.  Using the "with" keyword above _should_
+            # take care of this auatomatically.
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testWriteKeyDict(self):
+        """
+        test that write_key works using a standard key dict
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-WriteKeyDict-',suffix='.fits')
+        nrows=3
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                im=numpy.zeros( (10,10), dtype='i2' )
+                fits.write(im)
+
+                keydict = {
+                    'name':'test',
+                    'value':35,
+                    'comment':'keydict test',
+                }
+                fits[-1].write_key(**keydict)
+
+                h = fits[-1].read_header()
+
+                self.assertEqual(h['test'],keydict['value'])
+                self.assertEqual(h.get_comment('test'),keydict['comment'])
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+
+    def testMoveByName(self):
+        """
+        test moving hdus by name
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-MoveByName-',suffix='.fits')
+        nrows=3
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                data1=numpy.zeros(nrows,dtype=[('ra','f8'),('dec','f8')])
+                data1['ra'] = numpy.random.random(nrows)
+                data1['dec'] = numpy.random.random(nrows)
+                fits.write_table(data1, extname='mytable')
+
+                fits[-1].write_key("EXTVER", 1)
+
+                data2=numpy.zeros(nrows,dtype=[('ra','f8'),('dec','f8')])
+                data2['ra'] = numpy.random.random(nrows)
+                data2['dec'] = numpy.random.random(nrows)
+
+                fits.write_table(data2, extname='mytable')
+                fits[-1].write_key("EXTVER", 2)
+
+                hdunum1=fits.movnam_hdu('mytable',extver=1)
+                self.assertEqual(hdunum1,2)
+                hdunum2=fits.movnam_hdu('mytable',extver=2)
+                self.assertEqual(hdunum2,3)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testExtVer(self):
+        """
+        Test using extname and extver, all combinations I can think of
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-ExtVer-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                img1=numpy.arange(2*3,dtype='i4').reshape(2,3) + 5
+                img2=numpy.arange(2*3,dtype='i4').reshape(2,3) + 6
+                img3=numpy.arange(2*3,dtype='i4').reshape(2,3) + 7
+
+                nrows=3
+                data1=numpy.zeros(nrows,dtype=[('num','i4'),('ra','f8'),('dec','f8')])
+                data1['num'] = 1
+                data1['ra'] = numpy.random.random(nrows)
+                data1['dec'] = numpy.random.random(nrows)
+
+                data2=numpy.zeros(nrows,dtype=[('num','i4'),('ra','f8'),('dec','f8')])
+                data2['num'] = 2
+                data2['ra'] = numpy.random.random(nrows)
+                data2['dec'] = numpy.random.random(nrows)
+
+                data3=numpy.zeros(nrows,dtype=[('num','i4'),('ra','f8'),('dec','f8')])
+                data3['num'] = 3
+                data3['ra'] = numpy.random.random(nrows)
+                data3['dec'] = numpy.random.random(nrows)
+
+
+                hdr1={'k1':'key1'}
+                hdr2={'k2':'key2'}
+
+                fits.write_image(img1, extname='myimage', header=hdr1, extver=1)
+                fits.write_table(data1)
+                fits.write_table(data2,extname='mytable', extver=1)
+                fits.write_image(img2, extname='myimage', header=hdr2, extver=2)
+                fits.write_table(data3, extname='mytable',extver=2)
+                fits.write_image(img3)
+
+                d1  = fits[1].read()
+                d2  = fits['mytable'].read()
+                d2b = fits['mytable',1].read()
+                d3  = fits['mytable',2].read()
+
+
+                for f in data1.dtype.names:
+                    self.compare_rec(data1, d1, "data1")
+                    self.compare_rec(data2, d2, "data2")
+                    self.compare_rec(data2, d2b, "data2b")
+                    self.compare_rec(data3, d3, "data3")
+
+                dimg1  = fits[0].read()
+                dimg1b = fits['myimage',1].read()
+                dimg2  = fits['myimage',2].read()
+                dimg3  = fits[5].read()
+
+                self.compare_array(img1, dimg1,"img1")
+                self.compare_array(img1, dimg1b,"img1b")
+                self.compare_array(img2, dimg2,"img2")
+                self.compare_array(img3, dimg3,"img3")
+
+            rhdr1 = fitsio.read_header(fname, ext='myimage', extver=1)
+            rhdr2 = fitsio.read_header(fname, ext='myimage', extver=2)
+            self.assertTrue('k1' in rhdr1,'testing k1 in header version 1')
+            self.assertTrue('k2' in rhdr2,'testing k2 in header version 2')
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testVariableLengthColumns(self):
+        """
+        Write and read variable length columns
+        """
+
+        for vstorage in ['fixed','object']:
+            fname=tempfile.mktemp(prefix='fitsio-VarCol-',suffix='.fits')
+            try:
+                with fitsio.FITS(fname,'rw',clobber=True,vstorage=vstorage) as fits:
+                    fits.write(self.vardata)
+
+
+                    # reading multiple columns
+                    d = fits[1].read()
+                    self.compare_rec_with_var(self.vardata,d,"read all test '%s'" % vstorage)
+
+                    cols=['u2scalar','Sobj']
+                    d = fits[1].read(columns=cols)
+                    self.compare_rec_with_var(self.vardata,d,"read all test subcols '%s'" % vstorage)
+
+                    # one at a time
+                    for f in self.vardata.dtype.names:
+                        d = fits[1].read_column(f)
+                        if fitsio.util.is_object(self.vardata[f]):
+                            self.compare_object_array(self.vardata[f], d,
+                                                      "read all field '%s'" % f)
+
+                    # same as above with slices
+                    # reading multiple columns
+                    d = fits[1][:]
+                    self.compare_rec_with_var(self.vardata,d,"read all test '%s'" % vstorage)
+
+                    d = fits[1][cols][:]
+                    self.compare_rec_with_var(self.vardata,d,"read all test subcols '%s'" % vstorage)
+
+                    # one at a time
+                    for f in self.vardata.dtype.names:
+                        d = fits[1][f][:]
+                        if fitsio.util.is_object(self.vardata[f]):
+                            self.compare_object_array(self.vardata[f], d,
+                                                      "read all field '%s'" % f)
+
+
+
+                    #
+                    # now same with sub rows
+                    #
+
+                    # reading multiple columns
+                    rows = numpy.array([0,2])
+                    d = fits[1].read(rows=rows)
+                    self.compare_rec_with_var(self.vardata,d,"read subrows test '%s'" % vstorage,
+                                              rows=rows)
+
+                    d = fits[1].read(columns=cols, rows=rows)
+                    self.compare_rec_with_var(self.vardata,d,"read subrows test subcols '%s'" % vstorage,
+                                              rows=rows)
+
+                    # one at a time
+                    for f in self.vardata.dtype.names:
+                        d = fits[1].read_column(f,rows=rows)
+                        if fitsio.util.is_object(self.vardata[f]):
+                            self.compare_object_array(self.vardata[f], d,
+                                                      "read subrows field '%s'" % f,
+                                                      rows=rows)
+
+                    # same as above with slices
+                    # reading multiple columns
+                    d = fits[1][rows]
+                    self.compare_rec_with_var(self.vardata,d,"read subrows slice test '%s'" % vstorage,
+                                              rows=rows)
+                    d = fits[1][2:4]
+                    self.compare_rec_with_var(self.vardata,d,"read slice test '%s'" % vstorage,
+                                              rows=numpy.array([2,3]))
+
+                    d = fits[1][cols][rows]
+                    self.compare_rec_with_var(self.vardata,d,"read subcols subrows slice test '%s'" % vstorage,
+                                              rows=rows)
+                    d = fits[1][cols][2:4]
+                    self.compare_rec_with_var(self.vardata,d,"read subcols slice test '%s'" % vstorage,
+                                              rows=numpy.array([2,3]))
+
+                    # one at a time
+                    for f in self.vardata.dtype.names:
+                        d = fits[1][f][rows]
+                        if fitsio.util.is_object(self.vardata[f]):
+                            self.compare_object_array(self.vardata[f], d,
+                                                      "read subrows field '%s'" % f,
+                                                      rows=rows)
+                        d = fits[1][f][2:4]
+                        if fitsio.util.is_object(self.vardata[f]):
+                            self.compare_object_array(self.vardata[f], d,
+                                                      "read slice field '%s'" % f,
+                                                      rows=numpy.array([2,3]))
+
+
+
+
+            finally:
+                if os.path.exists(fname):
+                    os.remove(fname)
+
+
+    def testTableWriteRead(self):
+        """
+        Test a basic table write, data and a header, then reading back in to
+        check the values
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    fits.write_table(self.data, header=self.keys, extname='mytable')
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"testing write does not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+                d = fits[1].read()
+                self.compare_rec(self.data, d, "table read/write")
+
+                h = fits[1].read_header()
+                self.compare_headerlist_header(self.keys, h)
+
+            # see if our convenience functions are working
+            fitsio.write(fname, self.data2,
+                         extname="newext",
+                         header={'ra':335.2,'dec':-25.2})
+            d = fitsio.read(fname, ext='newext')
+            self.compare_rec(self.data2, d, "table data2")
+            # now test read_column
+            with fitsio.FITS(fname) as fits:
+
+                for f in self.data.dtype.names:
+                    d = fits[1].read_column(f)
+                    self.compare_array(self.data[f], d, "table 1 single field read '%s'" % f)
+
+                for f in self.data2.dtype.names:
+                    d = fits['newext'].read_column(f)
+                    self.compare_array(self.data2[f], d, "table 2 single field read '%s'" % f)
+
+                # now list of columns
+                for cols in [['u2scalar','f4vec','Sarr'],
+                             ['f8scalar','u2arr','Sscalar']]:
+                    d = fits[1].read(columns=cols)
+                    for f in d.dtype.names:
+                        self.compare_array(self.data[f][:], d[f], "test column list %s" % f)
+
+
+                    rows = [1,3]
+                    d = fits[1].read(columns=cols, rows=rows)
+                    for f in d.dtype.names:
+                        self.compare_array(self.data[f][rows], d[f], "test column list %s row subset" % f)
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testTableColumnIndexScalar(self):
+        """
+        Test a basic table write, data and a header, then reading back in to
+        check the values
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits')
+
+        with fitsio.FITS(fname,'rw',clobber=True) as fits:
+            data = numpy.empty(1, dtype=[('Z', 'f8')])
+            data['Z'][:] = 1.0
+            fits.write_table(data)
+            fits.write_table(data)
+        try:
+            with fitsio.FITS(fname,'r',clobber=True) as fits:
+                assert fits[1]['Z'][0].ndim == 0
+                assert fits[1][0].ndim == 0
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableReadEmptyRows(self):
+        """
+        test reading empty list of rows from an table.
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits')
+
+        with fitsio.FITS(fname,'rw',clobber=True) as fits:
+            data = numpy.empty(1, dtype=[('Z', 'f8')])
+            data['Z'][:] = 1.0
+            fits.write_table(data)
+            fits.write_table(data)
+        try:
+            with fitsio.FITS(fname,'r',clobber=True) as fits:
+                assert len(fits[1].read(rows=[])) == 0
+                assert len(fits[1].read(rows=range(0, 0))) == 0
+                assert len(fits[1].read(rows=numpy.arange(0, 0))) == 0
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableFormatColumnSubset(self):
+        """
+        Test a basic table write, data and a header, then reading back in to
+        check the values
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits')
+
+        with fitsio.FITS(fname,'rw',clobber=True) as fits:
+            data = numpy.empty(1, dtype=[('Z', 'f8'), ('Z_PERSON', 'f8')])
+            data['Z'][:] = 1.0
+            data['Z_PERSON'][:] = 1.0
+            fits.write_table(data)
+            fits.write_table(data)
+            fits.write_table(data)
+        try:
+            with fitsio.FITS(fname,'r',clobber=True) as fits:
+                # assert we do not have an extra row of 'Z'
+                sz = str(fits[2]['Z_PERSON']).split('\n')
+                s  = str(fits[2][('Z_PERSON', 'Z')]).split('\n')
+                assert len(sz) == len(s) - 1
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testTableWriteDictOfArraysScratch(self):
+        """
+        This version creating the table from a dict of arrays, creating
+        table first
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableDict-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    d={}
+                    for n in self.data.dtype.names:
+                        d[n] = self.data[n]
+
+                    fits.write(d)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"write should not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            d = fitsio.read(fname)
+            self.compare_rec(self.data, d, "list of dicts, scratch")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testTableWriteDictOfArrays(self):
+        """
+        This version creating the table from a dict of arrays
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableDict-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    fits.create_table_hdu(self.data, extname='mytable')
+
+                    d={}
+                    for n in self.data.dtype.names:
+                        d[n] = self.data[n]
+
+                    fits[-1].write(d)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"write should not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            d = fitsio.read(fname)
+            self.compare_rec(self.data, d, "list of dicts")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+
+    def testTableWriteDictOfArraysVar(self):
+        """
+        This version creating the table from a dict of arrays, variable
+        lenght columns
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableDictVar-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    d={}
+                    for n in self.vardata.dtype.names:
+                        d[n] = self.vardata[n]
+
+                    fits.write(d)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"write should not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            d = fitsio.read(fname)
+            self.compare_rec_with_var(self.vardata,d,"dict of arrays, var")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+
+    def testTableWriteListOfArraysScratch(self):
+        """
+        This version creating the table from the names and list, creating
+        table first
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableListScratch-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    names = [n for n in self.data.dtype.names]
+                    dlist = [self.data[n] for n in self.data.dtype.names]
+                    fits.write(dlist, names=names)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"write should not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            d = fitsio.read(fname)
+            self.compare_rec(self.data, d, "list of arrays, scratch")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+
+
+    def testTableWriteListOfArrays(self):
+        """
+        Test a basic table write, data and a header, then reading back in to
+        check the values
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWriteList-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    fits.create_table_hdu(self.data, extname='mytable')
+
+                    columns = [n for n in self.data.dtype.names]
+                    dlist = [self.data[n] for n in self.data.dtype.names]
+                    fits[-1].write(dlist, columns=columns)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"write should not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            d = fitsio.read(fname, ext='mytable')
+            self.compare_rec(self.data, d, "list of arrays")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testTableWriteListOfArraysVar(self):
+        """
+        This version creating the table from the names and list, variable
+        lenght cols
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableListScratch-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    names = [n for n in self.vardata.dtype.names]
+                    dlist = [self.vardata[n] for n in self.vardata.dtype.names]
+                    fits.write(dlist, names=names)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"write should not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            d = fitsio.read(fname)
+            self.compare_rec_with_var(self.vardata,d,"list of arrays, var")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testTableWriteBadString(self):
+        """
+        Test a basic table write, data and a header, then reading back in to
+        check the values
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWriteBadString-',suffix='.fits')
+
+        try:
+            for d in ['S0','U0']:
+                dt=[('s',d)]
+
+                # old numpy didn't allow this dtype, so will throw
+                # a TypeError for empty dtype
+                try:
+                    data = numpy.zeros(1, dtype=dt)
+                    supported = True
+                except TypeError:
+                    supported = False
+
+                if supported:
+                    with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                        try:
+                            fits.write(data)
+                            got_error=False
+                        except ValueError:
+                            got_error=True
+
+                        self.assertTrue(got_error == True,
+                                        "expected an error for zero sized string")
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testTableIter(self):
+        """
+        Test iterating over rows of a table
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableIter-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                try:
+                    fits.write_table(self.data, header=self.keys, extname='mytable')
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"testing write does not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+            # one row at a time
+            with fitsio.FITS(fname) as fits:
+                hdu = fits["mytable"]
+                i=0
+                for row_data in hdu:
+                    self.compare_rec(self.data[i], row_data, "table data")
+                    i+=1
+
+        finally:
+            if os.path.exists(fname):
+                #pass
+                os.remove(fname)
+
+    def testAsciiTableWriteRead(self):
+        """
+        Test write and read for an ascii table
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-AsciiTableWrite-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                fits.write_table(self.ascii_data, table_type='ascii', header=self.keys, extname='mytable')
+
+                # cfitsio always reports type as i4 and f8, period, even if if
+                # written with higher precision.  Need to fix that somehow
+                for f in self.ascii_data.dtype.names:
+                    d = fits[1].read_column(f)
+                    if d.dtype == numpy.float64:
+                        # note we should be able to do 1.11e-16 in principle, but in practice
+                        # we get more like 2.15e-16
+                        self.compare_array_tol(self.ascii_data[f], d, 2.15e-16, "table field read '%s'" % f)
+                    else:
+                        self.compare_array(self.ascii_data[f], d, "table field read '%s'" % f)
+
+                rows = [1,3]
+                for f in self.ascii_data.dtype.names:
+                    d = fits[1].read_column(f,rows=rows)
+                    if d.dtype == numpy.float64:
+                        self.compare_array_tol(self.ascii_data[f][rows], d, 2.15e-16,
+                                               "table field read subrows '%s'" % f)
+                    else:
+                        self.compare_array(self.ascii_data[f][rows], d,
+                                           "table field read subrows '%s'" % f)
+
+                beg=1
+                end=3
+                for f in self.ascii_data.dtype.names:
+                    d = fits[1][f][beg:end]
+                    if d.dtype == numpy.float64:
+                        self.compare_array_tol(self.ascii_data[f][beg:end], d, 2.15e-16,
+                                               "table field read slice '%s'" % f)
+                    else:
+                        self.compare_array(self.ascii_data[f][beg:end], d,
+                                           "table field read slice '%s'" % f)
+
+                cols = ['i2scalar','f4scalar']
+                for f in self.ascii_data.dtype.names:
+                    data = fits[1].read(columns=cols)
+                    for f in data.dtype.names:
+                        d=data[f]
+                        if d.dtype == numpy.float64:
+                            self.compare_array_tol(self.ascii_data[f], d, 2.15e-16, "table subcol, '%s'" % f)
+                        else:
+                            self.compare_array(self.ascii_data[f], d, "table subcol, '%s'" % f)
+
+                    data = fits[1][cols][:]
+                    for f in data.dtype.names:
+                        d=data[f]
+                        if d.dtype == numpy.float64:
+                            self.compare_array_tol(self.ascii_data[f], d, 2.15e-16, "table subcol, '%s'" % f)
+                        else:
+                            self.compare_array(self.ascii_data[f], d, "table subcol, '%s'" % f)
+
+                rows=[1,3]
+                for f in self.ascii_data.dtype.names:
+                    data = fits[1].read(columns=cols,rows=rows)
+                    for f in data.dtype.names:
+                        d=data[f]
+                        if d.dtype == numpy.float64:
+                            self.compare_array_tol(self.ascii_data[f][rows], d, 2.15e-16,
+                                                   "table subcol, '%s'" % f)
+                        else:
+                            self.compare_array(self.ascii_data[f][rows], d,
+                                               "table subcol, '%s'" % f)
+
+                    data = fits[1][cols][rows]
+                    for f in data.dtype.names:
+                        d=data[f]
+                        if d.dtype == numpy.float64:
+                            self.compare_array_tol(self.ascii_data[f][rows], d, 2.15e-16,
+                                                   "table subcol/row, '%s'" % f)
+                        else:
+                            self.compare_array(self.ascii_data[f][rows], d,
+                                               "table subcol/row, '%s'" % f)
+
+                for f in self.ascii_data.dtype.names:
+
+                    data = fits[1][cols][beg:end]
+                    for f in data.dtype.names:
+                        d=data[f]
+                        if d.dtype == numpy.float64:
+                            self.compare_array_tol(self.ascii_data[f][beg:end], d, 2.15e-16,
+                                                   "table subcol/slice, '%s'" % f)
+                        else:
+                            self.compare_array(self.ascii_data[f][beg:end], d,
+                                               "table subcol/slice, '%s'" % f)
+
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testTableInsertColumn(self):
+        """
+        Insert a new column
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableInsertColumn-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                fits.write_table(self.data, header=self.keys, extname='mytable')
+
+                d = fits[1].read()
+
+                for n in d.dtype.names:
+                    newname = n+'_insert'
+
+                    fits[1].insert_column(newname, d[n])
+
+                    newdata = fits[1][newname][:]
+
+                    self.compare_array(d[n], newdata, "table single field insert and read '%s'" % n)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableDeleteRowRange(self):
+        """
+        Test deleting a range of rows using the delete_rows method
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableDeleteRowRange-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write_table(self.data)
+
+            rowslice = slice(1,3)
+            with fitsio.FITS(fname,'rw') as fits:
+                fits[1].delete_rows(rowslice)
+
+            with fitsio.FITS(fname) as fits:
+                d = fits[1].read()
+
+            compare_data = self.data[ [0,3] ]
+            self.compare_rec(compare_data, d, "delete row range")
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableDeleteRows(self):
+        """
+        Test deleting specific set of rows using the delete_rows method
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableDeleteRows-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write_table(self.data)
+
+            rows2delete = [1,3]
+            with fitsio.FITS(fname,'rw') as fits:
+                fits[1].delete_rows(rows2delete)
+
+            with fitsio.FITS(fname) as fits:
+                d = fits[1].read()
+
+            compare_data = self.data[ [0,2] ]
+            self.compare_rec(compare_data, d, "delete rows")
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableResize(self):
+        """
+        Use the resize method to change the size of a table
+
+        default values get filled in and these are tested
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableResize-',suffix='.fits')
+        try:
+
+            #
+            # shrink from back
+            #
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write_table(self.data)
+
+            nrows = 2
+            with fitsio.FITS(fname,'rw') as fits:
+                fits[1].resize(nrows)
+
+            with fitsio.FITS(fname) as fits:
+                d = fits[1].read()
+
+            compare_data = self.data[0:nrows]
+            self.compare_rec(compare_data, d, "shrink from back")
+
+
+            #
+            # shrink from front
+            #
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write_table(self.data)
+
+            with fitsio.FITS(fname,'rw') as fits:
+                fits[1].resize(nrows, front=True)
+
+            with fitsio.FITS(fname) as fits:
+                d = fits[1].read()
+
+            compare_data = self.data[nrows-self.data.size:]
+            self.compare_rec(compare_data, d, "shrink from front")
+
+
+            # These don't get zerod
+
+            nrows = 10
+            add_data = numpy.zeros(nrows-self.data.size,dtype=self.data.dtype)
+            add_data['i1scalar'] = -128
+            add_data['i1vec'] = -128
+            add_data['i1arr'] = -128
+            add_data['u2scalar'] = 32768
+            add_data['u2vec'] = 32768
+            add_data['u2arr'] = 32768
+            add_data['u4scalar'] = 2147483648
+            add_data['u4vec'] = 2147483648
+            add_data['u4arr'] = 2147483648
+
+
+            #
+            # expand at the back
+            #
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write_table(self.data)
+            with fitsio.FITS(fname,'rw') as fits:
+                fits[1].resize(nrows)
+
+            with fitsio.FITS(fname) as fits:
+                d = fits[1].read()
+
+            compare_data = numpy.hstack( (self.data, add_data) )
+            self.compare_rec(compare_data, d, "expand at the back")
+
+            #
+            # expand at the front
+            #
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write_table(self.data)
+            with fitsio.FITS(fname,'rw') as fits:
+                fits[1].resize(nrows, front=True)
+
+            with fitsio.FITS(fname) as fits:
+                d = fits[1].read()
+
+            compare_data = numpy.hstack( (add_data, self.data) )
+            # These don't get zerod
+            self.compare_rec(compare_data, d, "expand at the front")
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+
+    def testSlice(self):
+        """
+        Test reading by slice
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableAppend-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                # initial write
+                fits.write_table(self.data)
+
+                # test reading single columns
+                for f in self.data.dtype.names:
+                    d = fits[1][f][:]
+                    self.compare_array(self.data[f], d, "test read all rows %s column subset" % f)
+
+                # test reading row subsets
+                rows = [1,3]
+                for f in self.data.dtype.names:
+                    d = fits[1][f][rows]
+                    self.compare_array(self.data[f][rows], d, "test %s row subset" % f)
+                for f in self.data.dtype.names:
+                    d = fits[1][f][1:3]
+                    self.compare_array(self.data[f][1:3], d, "test %s row slice" % f)
+                for f in self.data.dtype.names:
+                    d = fits[1][f][1:4:2]
+                    self.compare_array(self.data[f][1:4:2], d, "test %s row slice with step" % f)
+                for f in self.data.dtype.names:
+                    d = fits[1][f][::2]
+                    self.compare_array(self.data[f][::2], d, "test %s row slice with only setp" % f)
+
+                # now list of columns
+                cols=['u2scalar','f4vec','Sarr']
+                d = fits[1][cols][:]
+                for f in d.dtype.names:
+                    self.compare_array(self.data[f][:], d[f], "test column list %s" % f)
+
+
+                cols=['u2scalar','f4vec','Sarr']
+                d = fits[1][cols][rows]
+                for f in d.dtype.names:
+                    self.compare_array(self.data[f][rows], d[f], "test column list %s row subset" % f)
+
+                cols=['u2scalar','f4vec','Sarr']
+                d = fits[1][cols][1:3]
+                for f in d.dtype.names:
+                    self.compare_array(self.data[f][1:3], d[f], "test column list %s row slice" % f)
+
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+
+
+    def testTableAppend(self):
+        """
+        Test creating a table and appending new rows.
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableAppend-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                # initial write
+                fits.write_table(self.data, header=self.keys, extname='mytable')
+                # now append
+                data2 = self.data.copy()
+                data2['f4scalar'] = 3
+                fits[1].append(data2)
+
+                d = fits[1].read()
+                self.assertEqual(d.size, self.data.size*2)
+
+                self.compare_rec(self.data, d[0:self.data.size], "Comparing initial write")
+                self.compare_rec(data2, d[self.data.size:], "Comparing appended data")
+
+                h = fits[1].read_header()
+                self.compare_headerlist_header(self.keys, h)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableSubsets(self):
+        """
+        testing reading subsets
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                fits.write_table(self.data, header=self.keys, extname='mytable')
+
+
+                rows = [1,3]
+                d = fits[1].read(rows=rows)
+                self.compare_rec_subrows(self.data, d, rows, "table subset")
+                columns = ['i1scalar','f4arr']
+                d = fits[1].read(columns=columns, rows=rows)
+
+                for f in columns:
+                    d = fits[1].read_column(f,rows=rows)
+                    self.compare_array(self.data[f][rows], d, "row subset, multi-column '%s'" % f)
+                for f in self.data.dtype.names:
+                    d = fits[1].read_column(f,rows=rows)
+                    self.compare_array(self.data[f][rows], d, "row subset, column '%s'" % f)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+
+    def testGZWriteRead(self):
+        """
+        Test a basic table write, data and a header, then reading back in to
+        check the values
+
+        this code all works, but the file is zere size when done!
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-GZTableWrite-',suffix='.fits.gz')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                fits.write_table(self.data, header=self.keys, extname='mytable')
+
+                d = fits[1].read()
+                self.compare_rec(self.data, d, "gzip write/read")
+
+                h = fits[1].read_header()
+                for entry in self.keys:
+                    name=entry['name'].upper()
+                    value=entry['value']
+                    hvalue = h[name]
+                    if isinstance(hvalue,str):
+                        hvalue = hvalue.strip()
+                    self.assertEqual(value,hvalue,"testing header key '%s'" % name)
+
+                    if 'comment' in entry:
+                        self.assertEqual(entry['comment'].strip(),
+                                         h.get_comment(name).strip(),
+                                         "testing comment for header key '%s'" % name)
+            stat=os.stat(fname)
+            self.assertNotEqual(stat.st_size, 0, "Making sure the data was flushed to disk")
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testBz2Read(self):
+        '''
+        Write a normal .fits file, run bzip2 on it, then read the bz2
+        file and verify that it's the same as what we put in; we don't
+        [currently support or] test *writing* bzip2.
+        '''
+
+        if 'SKIP_BZIP_TEST' in os.environ:
+            if sys.version_info >= (2,7,0):
+                self.skipTest("skipping bzip tests")
+            else:
+                # skipTest only works for python 2.7+
+                # just return
+                return
+
+        fname=tempfile.mktemp(prefix='fitsio-BZ2TableWrite-',suffix='.fits')
+        bzfname = fname + '.bz2'
+
+        try:
+            fits = fitsio.FITS(fname,'rw',clobber=True)
+            fits.write_table(self.data, header=self.keys, extname='mytable')
+            fits.close()
+
+            os.system('bzip2 %s' % fname)
+            f2 = fitsio.FITS(bzfname)
+            d = f2[1].read()
+            self.compare_rec(self.data, d, "bzip2 read")
+
+            h = f2[1].read_header()
+            for entry in self.keys:
+                name=entry['name'].upper()
+                value=entry['value']
+                hvalue = h[name]
+                if isinstance(hvalue,str):
+                    hvalue = hvalue.strip()
+                self.assertEqual(value,hvalue,"testing header key '%s'" % name)
+                if 'comment' in entry:
+                    self.assertEqual(entry['comment'].strip(),
+                                     h.get_comment(name).strip(),
+                                     "testing comment for header key '%s'" % name)
+        except:
+            import traceback
+            traceback.print_exc()
+            self.assertTrue(False, 'Exception in testing bzip2 reading')
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+            if os.path.exists(bzfname):
+                os.remove(bzfname)
+            pass
+
+    def testChecksum(self):
+        """
+        test that checksumming works
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-Checksum-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                fits.write_table(self.data, header=self.keys, extname='mytable')
+                fits[1].write_checksum()
+                fits[1].verify_checksum()
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTrimStrings(self):
+        """
+        test mode where we strim strings on read
+        """
+        fname=tempfile.mktemp(prefix='fitsio-Trim-',suffix='.fits')
+        dt=[('fval','f8'),('name','S15'),('vec','f4',2)]
+        n=3
+        data=numpy.zeros(n, dtype=dt)
+        data['fval'] = numpy.random.random(n)
+        data['vec'] = numpy.random.random(n*2).reshape(n,2)
+
+        data['name'] = ['mike','really_long_name_to_fill','jan']
+
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write(data)
+
+            for onconstruct in [True,False]:
+                if onconstruct:
+                    ctrim=True
+                    otrim=False
+                else:
+                    ctrim=False
+                    otrim=True
+
+                with fitsio.FITS(fname,'rw', trim_strings=ctrim) as fits:
+
+                    if ctrim:
+                        dread=fits[1][:]
+                        self.compare_rec(
+                            data,
+                            dread,
+                            "trimmed strings constructor",
+                        )
+
+                        dname=fits[1]['name'][:]
+                        self.compare_array(
+                            data['name'],
+                            dname,
+                            "trimmed strings col read, constructor",
+                        )
+                        dread=fits[1][ ['name'] ][:]
+                        self.compare_array(
+                            data['name'],
+                            dread['name'],
+                            "trimmed strings col read, constructor",
+                        )
+
+
+
+                    dread=fits[1].read(trim_strings=otrim)
+                    self.compare_rec(
+                        data,
+                        dread,
+                        "trimmed strings keyword",
+                    )
+                    dname=fits[1].read(columns='name', trim_strings=otrim)
+                    self.compare_array(
+                        data['name'],
+                        dname,
+                        "trimmed strings col keyword",
+                    )
+                    dread=fits[1].read(columns=['name'], trim_strings=otrim)
+                    self.compare_array(
+                        data['name'],
+                        dread['name'],
+                        "trimmed strings col keyword",
+                    )
+
+
+
+            # convenience function
+            dread=fitsio.read(fname, trim_strings=True)
+            self.compare_rec(
+                data,
+                dread,
+                "trimmed strings convenience function",
+            )
+            dname=fitsio.read(fname, columns='name', trim_strings=True)
+            self.compare_array(
+                data['name'],
+                dname,
+                "trimmed strings col convenience function",
+            )
+            dread=fitsio.read(fname, columns=['name'], trim_strings=True)
+            self.compare_array(
+                data['name'],
+                dread['name'],
+                "trimmed strings col convenience function",
+            )
+
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def testLowerUpper(self):
+        """
+        test forcing names to upper and lower
+        """
+        fname=tempfile.mktemp(prefix='fitsio-LowerUpper-',suffix='.fits')
+        dt=[('MyName','f8'),('StuffThings','i4'),('Blah','f4')]
+        data=numpy.zeros(3, dtype=dt)
+        data['MyName'] = numpy.random.random(data.size)
+        data['StuffThings'] = numpy.random.random(data.size)
+        data['Blah'] = numpy.random.random(data.size)
+
+        lnames = [n.lower() for n in data.dtype.names]
+        unames = [n.upper() for n in data.dtype.names]
+
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write(data)
+
+            for i in [1,2]:
+                if i == 1:
+                    lower=True
+                    upper=False
+                else:
+                    lower=False
+                    upper=True
+
+                with fitsio.FITS(fname,'rw', lower=lower, upper=upper) as fits:
+                    for rows in [None, [1,2]]:
+
+                        d=fits[1].read(rows=rows)
+                        self.compare_names(d.dtype.names,data.dtype.names,
+                                           lower=lower,upper=upper)
+
+
+                        d=fits[1].read(rows=rows, columns=['MyName','stuffthings'])
+                        self.compare_names(d.dtype.names,data.dtype.names[0:2],
+                                           lower=lower,upper=upper)
+
+                        d = fits[1][1:2]
+                        self.compare_names(d.dtype.names,data.dtype.names,
+                                           lower=lower,upper=upper)
+
+                        if rows is not None:
+                            d = fits[1][rows]
+                        else:
+                            d = fits[1][:]
+                        self.compare_names(d.dtype.names,data.dtype.names,
+                                           lower=lower,upper=upper)
+
+                        if rows is not None:
+                            d = fits[1][['myname','stuffthings']][rows]
+                        else:
+                            d = fits[1][['myname','stuffthings']][:]
+                        self.compare_names(d.dtype.names,data.dtype.names[0:2],
+                                           lower=lower,upper=upper)
+
+                # using overrides
+                with fitsio.FITS(fname,'rw') as fits:
+                    for rows in [None, [1,2]]:
+
+                        d=fits[1].read(rows=rows, lower=lower, upper=upper)
+                        self.compare_names(d.dtype.names,data.dtype.names,
+                                           lower=lower,upper=upper)
+
+
+                        d=fits[1].read(rows=rows, columns=['MyName','stuffthings'],
+                                       lower=lower,upper=upper)
+                        self.compare_names(d.dtype.names,data.dtype.names[0:2],
+                                           lower=lower,upper=upper)
+
+
+
+                for rows in [None, [1,2]]:
+                    d=fitsio.read(fname, rows=rows, lower=lower, upper=upper)
+                    self.compare_names(d.dtype.names,data.dtype.names,
+                                       lower=lower,upper=upper)
+
+                    d=fitsio.read(fname, rows=rows, columns=['MyName','stuffthings'],
+                                  lower=lower, upper=upper)
+                    self.compare_names(d.dtype.names,data.dtype.names[0:2],
+                                       lower=lower,upper=upper)
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testReadRaw(self):
+        """
+        testing reading the file as raw bytes
+        """
+        fname=tempfile.mktemp(prefix='fitsio-readraw-',suffix='.fits')
+
+        dt=[('MyName','f8'),('StuffThings','i4'),('Blah','f4')]
+        data=numpy.zeros(3, dtype=dt)
+        data['MyName'] = numpy.random.random(data.size)
+        data['StuffThings'] = numpy.random.random(data.size)
+        data['Blah'] = numpy.random.random(data.size)
+
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                fits.write(data)
+                raw1 = fits.read_raw()
+
+            with fitsio.FITS('mem://', 'rw') as fits:
+                fits.write(data)
+                raw2 = fits.read_raw()
+
+            f = open(fname, 'rb')
+            raw3 = f.read()
+            f.close()
+
+            self.assertEqual(raw1, raw2)
+            self.assertEqual(raw1, raw3)
+        except:
+            import traceback
+            traceback.print_exc()
+            self.assertTrue(False, 'Exception in testing read_raw')
+
+    def testTableBitcolReadWrite(self):
+        """
+        Test basic write/read with bitcols
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableWriteBitcol-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                try:
+                    fits.write_table(self.bdata, extname='mytable', write_bitcols=True)
+                    write_success=True
+                except:
+                    write_success=False
+
+                self.assertTrue(write_success,"testing write does not raise an error")
+                if not write_success:
+                    self.skipTest("cannot test result if write failed")
+
+                d=fits[1].read()
+                self.compare_rec(self.bdata, d, "table read/write")
+
+            # now test read_column
+            with fitsio.FITS(fname) as fits:
+
+                for f in self.bdata.dtype.names:
+                    d = fits[1].read_column(f)
+                    self.compare_array(self.bdata[f], d, "table 1 single field read '%s'" % f)
+
+                # now list of columns
+                for cols in [['b1vec','b1arr']]:
+                    d = fits[1].read(columns=cols)
+                    for f in d.dtype.names:
+                        self.compare_array(self.bdata[f][:], d[f], "test column list %s" % f)
+
+                    rows = [1,3]
+                    d = fits[1].read(columns=cols, rows=rows)
+                    for f in d.dtype.names:
+                        self.compare_array(self.bdata[f][rows], d[f], "test column list %s row subset" % f)
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableBitcolAppend(self):
+        """
+        Test creating a table with bitcol support and appending new rows.
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableAppendBitcol-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                # initial write
+                fits.write_table(self.bdata, extname='mytable', write_bitcols=True)
+
+            with fitsio.FITS(fname,'rw') as fits:
+                # now append
+                bdata2 = self.bdata.copy()
+                fits[1].append(bdata2)
+
+                d = fits[1].read()
+                self.assertEqual(d.size, self.bdata.size*2)
+
+                self.compare_rec(self.bdata, d[0:self.data.size], "Comparing initial write")
+                self.compare_rec(bdata2, d[self.data.size:], "Comparing appended data")
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+    def testTableBitcolInsert(self):
+        """
+        Test creating a table with bitcol support and appending new rows.
+        """
+
+        fname=tempfile.mktemp(prefix='fitsio-TableBitcolInsert-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+
+                # initial write
+                nrows=3
+                d = numpy.zeros(nrows, dtype=[('ra','f8')])
+                d['ra'] = range(d.size)
+                fits.write(d)
+
+            with fitsio.FITS(fname,'rw') as fits:
+                bcol = numpy.array([True,False,True])
+
+                # now append
+                fits[-1].insert_column('bscalar_inserted', bcol, write_bitcols=True)
+
+                d = fits[-1].read()
+                self.assertEqual(d.size, nrows,'read size equals')
+                self.compare_array(bcol, d['bscalar_inserted'], "inserted bitcol")
+
+                bvec = numpy.array([[True,False], [False,True], [True,True] ])
+
+                # now append
+                fits[-1].insert_column('bvec_inserted', bvec, write_bitcols=True)
+
+                d = fits[-1].read()
+                self.assertEqual(d.size, nrows,'read size equals')
+                self.compare_array(bvec, d['bvec_inserted'], "inserted bitcol")
+
+
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+
+    def _record_exists(self, header_records, key, value):
+        for rec in header_records:
+            if rec['name'] == key and rec['value'] == value:
+                return True
+
+        return False
+
+    def testReadCommentHistory(self):
+        fname=tempfile.mktemp(prefix='fitsio-TableBitcolInsert-',suffix='.fits')
+        try:
+            with fitsio.FITS(fname,'rw',clobber=True) as fits:
+                data = numpy.arange(100).reshape(10, 10)
+                fits.create_image_hdu(data)
+                hdu = fits[-1]
+                hdu.write_comment('A COMMENT 1')
+                hdu.write_comment('A COMMENT 2')
+                hdu.write_history('SOME HISTORY 1')
+                hdu.write_history('SOME HISTORY 2')
+                fits.close()
+
+            with fitsio.FITS(fname, 'r') as fits:
+                hdu = fits[-1]
+                header = hdu.read_header()
+                records = header.records()
+                self.assertTrue(self._record_exists(records, 'COMMENT', 'A COMMENT 1'))
+                self.assertTrue(self._record_exists(records, 'COMMENT', 'A COMMENT 2'))
+                self.assertTrue(self._record_exists(records, 'HISTORY', 'SOME HISTORY 1'))
+                self.assertTrue(self._record_exists(records, 'HISTORY', 'SOME HISTORY 2'))
+
+        finally:
+            if os.path.exists(fname):
+                os.remove(fname)
+
+
+    def compare_names(self, read_names, true_names, lower=False, upper=False):
+        for nread,ntrue in zip(read_names,true_names):
+            if lower:
+                tname = ntrue.lower()
+                mess="lower: '%s' vs '%s'" % (nread,tname)
+            else:
+                tname = ntrue.upper()
+                mess="upper: '%s' vs '%s'" % (nread,tname)
+            self.assertEqual(nread, tname, mess)
+
+    def check_header(self, header, rh):
+        for k in header:
+            v = header[k]
+            rv = rh[k]
+            if isinstance(rv,str):
+                v = v.strip()
+                rv = rv.strip()
+            self.assertEqual(v,rv,"testing equal key '%s'" % k)
+
+
+    def compare_headerlist_header(self, header_list, header):
+        """
+        The first is a list of dicts, second a FITSHDR
+        """
+        for entry in header_list:
+            name=entry['name'].upper()
+            value=entry['value']
+            hvalue = header[name]
+            if isinstance(hvalue,str):
+                hvalue = hvalue.strip()
+            self.assertEqual(value,hvalue,"testing header key '%s'" % name)
+
+            if 'comment' in entry:
+                self.assertEqual(entry['comment'].strip(),
+                                 header.get_comment(name).strip(),
+                                 "testing comment for header key '%s'" % name)
+
+    def _cast_shape(self, shape):
+        if len(shape) == 2 and shape[1] == 1:
+            return (shape[0],)
+        elif shape == (1,):
+            return tuple()
+        else:
+            return shape
+
+    def compare_array_tol(self, arr1, arr2, tol, name):
+        self.assertEqual(arr1.shape, arr2.shape,
+                         "testing arrays '%s' shapes are equal: "
+                         "input %s, read: %s" % (name, arr1.shape, arr2.shape))
+
+        adiff = numpy.abs( (arr1-arr2)/arr1 )
+        maxdiff = adiff.max()
+        res=numpy.where(adiff  > tol)
+        for i,w in enumerate(res):
+            self.assertEqual(w.size,0,
+                             "testing array '%s' dim %d are "
+                             "equal within tolerance %e, found "
+                             "max diff %e" % (name,i,tol,maxdiff))
+
+    def compare_array_abstol(self, arr1, arr2, tol, name):
+        self.assertEqual(arr1.shape, arr2.shape,
+                         "testing arrays '%s' shapes are equal: "
+                         "input %s, read: %s" % (name, arr1.shape, arr2.shape))
+
+        adiff = numpy.abs(arr1-arr2)
+        maxdiff = adiff.max()
+        res=numpy.where(adiff  > tol)
+        for i,w in enumerate(res):
+            self.assertEqual(w.size,0,
+                             "testing array '%s' dim %d are "
+                             "equal within tolerance %e, found "
+                             "max diff %e" % (name,i,tol,maxdiff))
+
+
+    def compare_array(self, arr1, arr2, name):
+        arr1_shape = self._cast_shape(arr1.shape)
+        arr2_shape = self._cast_shape(arr2.shape)
+
+        self.assertEqual(arr1_shape, arr2_shape,
+                         "testing arrays '%s' shapes are equal: "
+                         "input %s, read: %s" % (name, arr1_shape, arr2_shape))
+
+        if sys.version_info >= (3, 0, 0) and arr1.dtype.char == 'S':
+            _arr1 = arr1.astype('U')
+        else:
+            _arr1 = arr1
+        res=numpy.where(_arr1 != arr2)
+        for i,w in enumerate(res):
+            self.assertEqual(w.size,0,"testing array '%s' dim %d are equal" % (name,i))
+
+    def compare_rec(self, rec1, rec2, name):
+        for f in rec1.dtype.names:
+            rec1_shape = self._cast_shape(rec1[f].shape)
+            rec2_shape = self._cast_shape(rec2[f].shape)
+            self.assertEqual(rec1_shape, rec2_shape,
+                             "testing '%s' field '%s' shapes are equal: "
+                             "input %s, read: %s" % (
+                                name, f, rec1_shape, rec2_shape))
+
+            if sys.version_info >= (3, 0, 0) and rec1[f].dtype.char == 'S':
+                # for python 3, we get back unicode always
+                _rec1f = rec1[f].astype('U')
+            else:
+                _rec1f = rec1[f]
+
+            res=numpy.where(_rec1f != rec2[f])
+            for w in res:
+                self.assertEqual(w.size,0,"testing column %s" % f)
+
+    def compare_rec_subrows(self, rec1, rec2, rows, name):
+        for f in rec1.dtype.names:
+            rec1_shape = self._cast_shape(rec1[f][rows].shape)
+            rec2_shape = self._cast_shape(rec2[f].shape)
+
+            self.assertEqual(rec1_shape, rec2_shape,
+                             "testing '%s' field '%s' shapes are equal: "
+                             "input %s, read: %s" % (
+                                name, f, rec1_shape, rec2_shape))
+
+            if sys.version_info >= (3, 0, 0) and rec1[f].dtype.char == 'S':
+                # for python 3, we get back unicode always
+                _rec1frows = rec1[f][rows].astype('U')
+            else:
+                _rec1frows = rec1[f][rows]
+
+            res=numpy.where(_rec1frows != rec2[f])
+            for w in res:
+                self.assertEqual(w.size,0,"testing column %s" % f)
+
+            #self.assertEqual(2,3,"on purpose error")
+
+    def compare_rec_with_var(self, rec1, rec2, name, rows=None):
+        """
+
+        First one *must* be the one with object arrays
+
+        Second can have fixed length
+
+        both should be same number of rows
+
+        """
+
+        if rows is None:
+            rows = arange(rec2.size)
+            self.assertEqual(rec1.size,rec2.size,
+                             "testing '%s' same number of rows" % name)
+
+        # rec2 may have fewer fields
+        for f in rec2.dtype.names:
+
+            # f1 will have the objects
+            if fitsio.util.is_object(rec1[f]):
+                self.compare_object_array(rec1[f], rec2[f],
+                                          "testing '%s' field '%s'" % (name,f),
+                                          rows=rows)
+            else:
+                self.compare_array(rec1[f][rows], rec2[f],
+                                   "testing '%s' num field '%s' equal" % (name,f))
+
+    def compare_object_array(self, arr1, arr2, name, rows=None):
+        """
+        The first must be object
+        """
+        if rows is None:
+            rows = arange(arr1.size)
+
+        for i,row in enumerate(rows):
+            if (sys.version_info >= (3, 0, 0) and isinstance(arr2[i], bytes)) or isinstance(arr2[i], str):
+                if sys.version_info >= (3, 0, 0) and isinstance(arr1[row], bytes):
+                    _arr1row = arr1[row].decode('ascii')
+                else:
+                    _arr1row = arr1[row]
+                self.assertEqual(_arr1row,arr2[i],
+                                "%s str el %d equal" % (name,i))
+            else:
+                delement = arr2[i]
+                orig = arr1[row]
+                s=len(orig)
+                self.compare_array(orig, delement[0:s],
+                                   "%s num el %d equal" % (name,i))
+
+    def compare_rec_with_var_subrows(self, rec1, rec2, name, rows):
+        """
+
+        Second one must be the one with object arrays
+
+        """
+        for f in rec1.dtype.names:
+            if fitsio.util.is_object(rec2[f]):
+
+                for i in xrange(rec2.size):
+                    if isinstance(rec2[f][i],str):
+                        self.assertEqual(rec1[f][i],rec2[f][i],
+                                        "testing '%s' str field '%s' el %d equal" % (name,f,i))
+                    else:
+                        delement = rec1[f][i]
+                        orig = rec2[f][i]
+                        s=orig.size
+                        self.compare_array(orig, delement[0:s],
+                                           "testing '%s' num field '%s' el %d equal" % (name,f,i))
+            else:
+                self.compare_array(rec1[f], rec2[f],
+                                   "testing '%s' num field '%s' equal" % (name,f))
+
+
+
+
+
+if __name__ == '__main__':
+    test()
diff --git a/fitsio/test_images/test_gzip_compressed_image.fits.fz b/fitsio/test_images/test_gzip_compressed_image.fits.fz
new file mode 100644 (file)
index 0000000..6dae22d
Binary files /dev/null and b/fitsio/test_images/test_gzip_compressed_image.fits.fz differ
diff --git a/fitsio/util.py b/fitsio/util.py
new file mode 100644 (file)
index 0000000..562b609
--- /dev/null
@@ -0,0 +1,155 @@
+"""
+utilities for the fits library
+"""
+import sys
+import numpy
+
+from . import _fitsio_wrap
+
+if sys.version_info >= (3, 0, 0):
+    IS_PY3 = True
+else:
+    IS_PY3 = False
+
+
+class FITSRuntimeWarning(RuntimeWarning):
+    pass
+
+
+def cfitsio_version(asfloat=False):
+    """
+    Return the cfitsio version as a string.
+    """
+    # use string version to avoid roundoffs
+    ver = '%0.3f' % _fitsio_wrap.cfitsio_version()
+    if asfloat:
+        return float(ver)
+    else:
+        return ver
+
+
+if sys.version_info > (3, 0, 0):
+    _itypes = (int,)
+    _stypes = (str, bytes)
+else:
+    _itypes = (int, long)  # noqa - only for py2
+    _stypes = (basestring, unicode,)  # noqa - only for py2
+
+_itypes += (numpy.uint8, numpy.int8,
+            numpy.uint16, numpy.int16,
+            numpy.uint32, numpy.int32,
+            numpy.uint64, numpy.int64)
+
+# different for py3
+_stypes += (numpy.string_, numpy.str_)
+
+# for header keywords
+_ftypes = (float, numpy.float32, numpy.float64)
+
+
+def isstring(arg):
+    return isinstance(arg, _stypes)
+
+
+def isinteger(arg):
+    return isinstance(arg, _itypes)
+
+
+def is_object(arr):
+    if arr.dtype.descr[0][1][1] == 'O':
+        return True
+    else:
+        return False
+
+
+def fields_are_object(arr):
+    isobj = numpy.zeros(len(arr.dtype.names), dtype=bool)
+    for i, name in enumerate(arr.dtype.names):
+        if is_object(arr[name]):
+            isobj[i] = True
+    return isobj
+
+
+def is_little_endian(array):
+    """
+    Return True if array is little endian, False otherwise.
+
+    Parameters
+    ----------
+    array: numpy array
+        A numerical python array.
+
+    Returns
+    -------
+    Truth value:
+        True for little-endian
+
+    Notes
+    -----
+    Strings are neither big or little endian.  The input must be a simple numpy
+    array, not an array with fields.
+    """
+    if numpy.little_endian:
+        machine_little = True
+    else:
+        machine_little = False
+
+    byteorder = array.dtype.base.byteorder
+    return (byteorder == '<') or (machine_little and byteorder == '=')
+
+
+def array_to_native(array, inplace=False):
+    """
+    Convert an array to the native byte order.
+
+    NOTE: the inplace keyword argument is not currently used.
+    """
+    if numpy.little_endian:
+        machine_little = True
+    else:
+        machine_little = False
+
+    data_little = False
+    if array.dtype.names is None:
+
+        if array.dtype.base.byteorder == '|':
+            # strings and 1 byte integers
+            return array
+
+        data_little = is_little_endian(array)
+    else:
+        # assume all are same byte order: we only need to find one with
+        # little endian
+        for fname in array.dtype.names:
+            if is_little_endian(array[fname]):
+                data_little = True
+                break
+
+    if ((machine_little and not data_little)
+            or (not machine_little and data_little)):
+        output = array.byteswap(inplace)
+    else:
+        output = array
+
+    return output
+
+
+def array_to_native_c(array_in, inplace=False):
+    # copy only made if not C order
+    arr = numpy.array(array_in, order='C', copy=False)
+    return array_to_native(arr, inplace=inplace)
+
+
+def mks(val):
+    """
+    make sure the value is a string, paying mind to python3 vs 2
+    """
+    if sys.version_info > (3, 0, 0):
+        if isinstance(val, bytes):
+            sval = str(val, 'utf-8')
+        else:
+            sval = str(val)
+    else:
+        sval = str(val)
+
+    return sval
diff --git a/patches/README.md b/patches/README.md
new file mode 100644 (file)
index 0000000..29730ac
--- /dev/null
@@ -0,0 +1,26 @@
+# Patches for cfitsio
+
+This directory contains patches for the cfitsio build. These patches
+are applied before the library is compiled during the python package
+build step.
+
+The patches were generated with the script `build_cfitsio_patches.py` by
+Matthew Becker in December of 2018.
+
+## Adding New Patches
+
+To add new patches, you need to
+
+1. Make a copy of the file you want to patch.
+2. Modify it.
+3. Call `diff -u old_file new_file` to a get a unified format patch.
+4. Make sure the paths in the patch at the top look like this
+    ```
+    --- cfitsio<version>/<filename>    2018-03-01 10:28:51.000000000 -0600
+    +++ cfitsio<version>/<filename>    2018-12-14 08:39:20.000000000 -0600
+    ...
+    ``` 
+    where `<version>` and `<filename>` have the current cfitsio version and
+    file that is being patched.
+
+5. Commit the patch file in the patches directory with the name `<filename>.patch`.
diff --git a/patches/build_cfitsio_patches.py b/patches/build_cfitsio_patches.py
new file mode 100644 (file)
index 0000000..e36b6f0
--- /dev/null
@@ -0,0 +1,35 @@
+import os
+import sys
+import subprocess
+
+VERSION = '3.47'
+SRC_URL = (
+    "https://heasarc.gsfc.nasa.gov/FTP/software/"
+    "fitsio/c/cfitsio-%s.tar.gz" % VERSION)
+SRC_TARBALL = os.path.basename(SRC_URL)
+SRC_DIR = os.path.basename(SRC_URL).replace('.tar.gz', '')
+
+# download
+os.system(
+    'rm -rf %s && rm -f %s && wget %s && tar xzvf %s && ls -alh' % (
+        SRC_DIR, SRC_TARBALL, SRC_URL, SRC_TARBALL))
+
+# diff src files
+# the sources are all at the top level
+os.makedirs('patches', exist_ok=True)
+
+for root, _, files in os.walk(SRC_DIR):
+    print(files)
+    for fname in files:
+        src = os.path.join(SRC_DIR, fname)
+        dst = os.path.join('cfitsio-%spatch' % VERSION, fname)
+        patch = os.path.join('patches', fname + '.patch')
+        os.system('diff -u %s %s > %s' % (src, dst, patch))
+        with open(patch, 'rb') as fp:
+            buff = fp.read()
+        if len(buff) == 0:
+            os.remove(patch)
+    break
+
+# clean up
+os.system('rm -rf %s && rm -f %s' % (SRC_DIR, SRC_TARBALL))
diff --git a/patches/configure.in.patch b/patches/configure.in.patch
new file mode 100644 (file)
index 0000000..20241de
--- /dev/null
@@ -0,0 +1,31 @@
+--- cfitsio3470/configure.in   2018-03-01 10:28:51.000000000 -0600
++++ cfitsio3470/configure.in   2018-12-14 08:39:20.000000000 -0600
+@@ -42,6 +42,13 @@
+   [ if test $enableval = yes; then SSE_FLAGS="$SSE_FLAGS -mssse3"; fi ]
+ )
+
++AC_ARG_ENABLE(
++  standard_strings,
++  [AS_HELP_STRING([--enable-standard-strings],[Enable use of FITSIO standard string processing])],
++  [ if test $enableval = yes; then USE_STANDARD_STRINGS=yes; fi ]
++)
++
++
+ # Define BUILD_HERA when building for HERA project to activate code in
+ # drvrfile.c (by way of fitsio2.h):
+ AC_ARG_ENABLE(
+@@ -582,6 +589,14 @@
+   AC_CHECK_LIB([pthread],[main],[],[AC_MSG_ERROR(Unable to locate pthread library needed when enabling reentrant multithreading)])
+ fi
+
++# ------------------------------------------------------------------------------
++# Define FITS_USE_STANDARD_STRINGS
++# ------------------------------------------------------------------------------
++if test "x$USE_STANDARD_STRINGS" = xyes; then
++  AC_DEFINE(FITS_USE_STANDARD_STRINGS)
++fi
++
++
+ # -------------------------------------------------------------------------
+ # there are some idiosyncrasies with semun defs (used in semxxx). Solaris
+ # does not define it at all
diff --git a/patches/configure.patch b/patches/configure.patch
new file mode 100644 (file)
index 0000000..a6e9e34
--- /dev/null
@@ -0,0 +1,50 @@
+--- cfitsio3470/configure      2018-03-01 10:28:51.000000000 -0600
++++ cfitsio3470/configure      2018-12-14 08:39:20.000000000 -0600
+@@ -708,6 +708,7 @@
+ enable_reentrant
+ enable_sse2
+ enable_ssse3
++enable_standard_strings
+ enable_hera
+ with_gsiftp_flavour
+ with_gsiftp
+@@ -1339,6 +1340,8 @@
+                           instruction set
+   --enable-ssse3          Enable use of instructions in the SSSE3 extended
+                           instruction set
++  --enable-standard-strings
++                          Enable use of FITSIO standard string processing
+   --enable-hera           Build for HERA (ASD use only)
+
+ Optional Packages:
+@@ -2250,6 +2253,14 @@
+ fi
+
+
++# Check whether --enable-standard_strings was given.
++if test "${enable_standard_strings+set}" = set; then :
++  enableval=$enable_standard_strings;  if test $enableval = yes; then USE_STANDARD_STRINGS=yes; fi
++
++fi
++
++
++
+ # Define BUILD_HERA when building for HERA project to activate code in
+ # drvrfile.c (by way of fitsio2.h):
+ # Check whether --enable-hera was given.
+@@ -5072,6 +5083,15 @@
+
+ fi
+
++# ------------------------------------------------------------------------------
++# Define FITS_USE_STANDARD_STRINGS
++# ------------------------------------------------------------------------------
++if test "x$USE_STANDARD_STRINGS" = xyes; then
++  $as_echo "#define FITS_USE_STANDARD_STRINGS 1" >>confdefs.h
++
++fi
++
++
+ # -------------------------------------------------------------------------
+ # there are some idiosyncrasies with semun defs (used in semxxx). Solaris
+ # does not define it at all
diff --git a/patches/drvrnet.c.patch b/patches/drvrnet.c.patch
new file mode 100644 (file)
index 0000000..941a14e
--- /dev/null
@@ -0,0 +1,11 @@
+--- drvrnet.c  2020-03-03 09:04:34.333712777 -0500
++++ drvrnet_new.c      2020-03-03 09:05:00.523143138 -0500
+@@ -1258,7 +1258,7 @@
+          if (urlname)
+          {
+             fprintf(stderr,"Downloading ");
+-            fprintf(stderr,urlname);
++            fprintf(stderr,"%s",urlname);
+             fprintf(stderr,"...\n");
+          }
+          isFirst = 0;
diff --git a/patches/fitscore.c.patch b/patches/fitscore.c.patch
new file mode 100644 (file)
index 0000000..2692a40
--- /dev/null
@@ -0,0 +1,22 @@
+--- cfitsio3470/fitscore.c     2018-03-01 10:28:51.000000000 -0600
++++ cfitsio3470/fitscore.c     2018-12-14 08:39:20.000000000 -0600
+@@ -182,6 +182,19 @@
+
+     return(*version);
+ }
++
++/*
++   Return 1 if we are to treat strings per the FITS standard (not
++   replacing nulls with spaces, and not padding with spaces)
++*/
++int fits_use_standard_strings(void) {
++#ifdef FITS_USE_STANDARD_STRINGS
++    return 1;
++#else
++    return 0;
++#endif
++}
++
+ /*--------------------------------------------------------------------------*/
+ int ffflnm(fitsfile *fptr,    /* I - FITS file pointer  */
+            char *filename,    /* O - name of the file   */
diff --git a/patches/fitsio.h.patch b/patches/fitsio.h.patch
new file mode 100644 (file)
index 0000000..ea383f6
--- /dev/null
@@ -0,0 +1,10 @@
+--- cfitsio3470/fitsio.h       2018-03-01 10:28:51.000000000 -0600
++++ cfitsio3470/fitsio.h       2018-12-14 08:39:20.000000000 -0600
+@@ -797,6 +797,7 @@
+ /*---------------- utility routines -------------*/
+
+ float CFITS_API ffvers(float *version);
++int CFITS_API fits_use_standard_strings(void);
+ void CFITS_API ffupch(char *string);
+ void CFITS_API ffgerr(int status, char *errtext);
+ void CFITS_API ffpmsg(const char *err_message);
diff --git a/patches/putcols.c.patch b/patches/putcols.c.patch
new file mode 100644 (file)
index 0000000..15ab519
--- /dev/null
@@ -0,0 +1,19 @@
+--- cfitsio3470/putcols.c      2018-03-01 10:28:51.000000000 -0600
++++ cfitsio3470/putcols.c      2018-12-14 08:39:20.000000000 -0600
+@@ -158,7 +158,16 @@
+
+          for (;jj < twidth; jj++)    /* fill field with blanks, if needed */
+          {
+-           *buffer = ' ';
++             if ( fits_use_standard_strings() ) {
++                 if (snull[0] == ASCII_NULL_UNDEFINED) {
++                     *buffer = ' ';
++                 } else {
++                     *buffer = '\0';
++                 }
++             } else {
++                 *buffer = ' ';
++             }
++
+            buffer++;
+          }
diff --git a/setup.cfg b/setup.cfg
new file mode 100644 (file)
index 0000000..8bfd5a1
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..afdd162
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,289 @@
+#
+# setup script for fitsio, using setuptools
+#
+# c.f.
+# https://packaging.python.org/guides/distributing-packages-using-setuptools/
+
+from __future__ import print_function
+from setuptools import setup, Extension, find_packages
+from setuptools.command.build_ext import build_ext
+
+import os
+import subprocess
+from subprocess import Popen, PIPE
+import glob
+import shutil
+
+
+class build_ext_subclass(build_ext):
+    boolean_options = build_ext.boolean_options + ['use-system-fitsio']
+
+    user_options = build_ext.user_options + [
+        ('use-system-fitsio', None, "Use the cfitsio installed in the system"),
+        ('system-fitsio-includedir=', None,
+         "Path to look for cfitsio header; default is "
+         "the system search path."),
+        ('system-fitsio-libdir=', None,
+         "Path to look for cfitsio library; default is "
+         "the system search path."),
+    ]
+    cfitsio_version = '3490'
+    cfitsio_dir = 'cfitsio%s' % cfitsio_version
+
+    def initialize_options(self):
+        self.use_system_fitsio = False
+        self.system_fitsio_includedir = None
+        self.system_fitsio_libdir = None
+        build_ext.initialize_options(self)
+
+    def finalize_options(self):
+
+        build_ext.finalize_options(self)
+
+        self.cfitsio_build_dir = os.path.join(
+            self.build_temp, self.cfitsio_dir)
+        self.cfitsio_zlib_dir = os.path.join(
+            self.cfitsio_build_dir, 'zlib')
+        self.cfitsio_patch_dir = os.path.join(
+            self.build_temp, 'patches')
+
+        if self.use_system_fitsio:
+            if self.system_fitsio_includedir:
+                self.include_dirs.insert(0, self.system_fitsio_includedir)
+            if self.system_fitsio_libdir:
+                self.library_dirs.insert(0, self.system_fitsio_libdir)
+        else:
+            # We defer configuration of the bundled cfitsio to build_extensions
+            # because we will know the compiler there.
+            self.include_dirs.insert(0, self.cfitsio_build_dir)
+
+    def run(self):
+        # For extensions that require 'numpy' in their include dirs,
+        # replace 'numpy' with the actual paths
+        import numpy
+        np_include = numpy.get_include()
+
+        for extension in self.extensions:
+            if 'numpy' in extension.include_dirs:
+                idx = extension.include_dirs.index('numpy')
+                extension.include_dirs.insert(idx, np_include)
+                extension.include_dirs.remove('numpy')
+
+        build_ext.run(self)
+
+    def build_extensions(self):
+        if not self.use_system_fitsio:
+
+            # Use the compiler for building python to build cfitsio
+            # for maximized compatibility.
+
+            # there is some issue with non-aligned data with optimizations
+            # set to '-O3' on some versions of gcc.  It appears to be
+            # a disagreement between gcc 4 and gcc 5
+
+            CCold = self.compiler.compiler
+            CC = []
+            for val in CCold:
+                if val == '-O3':
+                    print("replacing '-O3' with '-O2' to address "
+                          "gcc bug")
+                    val = '-O2'
+                if val == 'ccache':
+                    print("removing ccache from the compiler options")
+                    continue
+
+                CC.append(val)
+
+            self.configure_cfitsio(
+                CC=CC,
+                ARCHIVE=self.compiler.archiver,
+                RANLIB=self.compiler.ranlib,
+            )
+
+            # If configure detected bzlib.h, we have to link to libbz2
+            with open(os.path.join(self.cfitsio_build_dir, 'Makefile')) as fp:
+                _makefile = fp.read()
+                if '-DHAVE_BZIP2=1' in _makefile:
+                    self.compiler.add_library('bz2')
+                if '-DCFITSIO_HAVE_CURL=1' in _makefile:
+                    self.compiler.add_library('curl')
+
+            self.compile_cfitsio()
+
+            # link against the .a library in cfitsio;
+            # It should have been a 'static' library of relocatable objects
+            # (-fPIC), since we use the python compiler flags
+
+            link_objects = glob.glob(
+                os.path.join(self.cfitsio_build_dir, '*.a'))
+
+            self.compiler.set_link_objects(link_objects)
+
+            # Ultimate hack: append the .a files to the dependency list
+            # so they will be properly rebuild if cfitsio source is updated.
+            for ext in self.extensions:
+                ext.depends += link_objects
+        else:
+            self.compiler.add_library('cfitsio')
+
+            # Check if system cfitsio was compiled with bzip2 and/or curl
+            if self.check_system_cfitsio_objects('bzip2'):
+                self.compiler.add_library('bz2')
+            if self.check_system_cfitsio_objects('curl_'):
+                self.compiler.add_library('curl')
+
+            # Make sure the external lib has the fits_use_standard_strings
+            # function. If not, then define a macro to tell the wrapper
+            # to always return False.
+            if not self.check_system_cfitsio_objects(
+                    '_fits_use_standard_strings'):
+                self.compiler.define_macro(
+                    'FITSIO_PYWRAP_ALWAYS_NONSTANDARD_STRINGS')
+
+        # fitsio requires libm as well.
+        self.compiler.add_library('m')
+
+        # call the original build_extensions
+
+        build_ext.build_extensions(self)
+
+    def patch_cfitsio(self):
+        patches = glob.glob('%s/*.patch' % self.cfitsio_patch_dir)
+        for patch in patches:
+            fname = os.path.basename(patch.replace('.patch', ''))
+            try:
+                subprocess.check_call(
+                    'patch -N --dry-run %s/%s %s' % (
+                        self.cfitsio_build_dir, fname, patch),
+                    shell=True)
+            except subprocess.CalledProcessError:
+                pass
+            else:
+                subprocess.check_call(
+                    'patch %s/%s %s' % (
+                        self.cfitsio_build_dir, fname, patch),
+                    shell=True)
+
+    def configure_cfitsio(self, CC=None, ARCHIVE=None, RANLIB=None):
+
+        # prepare source code and run configure
+        def copy_update(dir1, dir2):
+            f1 = os.listdir(dir1)
+            for f in f1:
+                path1 = os.path.join(dir1, f)
+                path2 = os.path.join(dir2, f)
+
+                if os.path.isdir(path1):
+                    if not os.path.exists(path2):
+                        os.makedirs(path2)
+                    copy_update(path1, path2)
+                else:
+                    if not os.path.exists(path2):
+                        shutil.copy(path1, path2)
+                    else:
+                        stat1 = os.stat(path1)
+                        stat2 = os.stat(path2)
+                        if (stat1.st_mtime > stat2.st_mtime):
+                            shutil.copy(path1, path2)
+
+        if not os.path.exists('build'):
+            os.makedirs('build')
+
+        if not os.path.exists(self.cfitsio_build_dir):
+            os.makedirs(self.cfitsio_build_dir)
+
+        if not os.path.exists(self.cfitsio_patch_dir):
+            os.makedirs(self.cfitsio_patch_dir)
+
+        copy_update(self.cfitsio_dir, self.cfitsio_build_dir)
+        copy_update('patches', self.cfitsio_patch_dir)
+
+        # we patch the source in the buil dir to avoid mucking with the repo
+        self.patch_cfitsio()
+
+        makefile = os.path.join(self.cfitsio_build_dir, 'Makefile')
+
+        if os.path.exists(makefile):
+            # Makefile already there
+            return
+
+        args = ''
+        args += ' CC="%s"' % ' '.join(CC[:1])
+        args += ' CFLAGS="%s"' % ' '.join(CC[1:])
+
+        if ARCHIVE:
+            args += ' ARCHIVE="%s"' % ' '.join(ARCHIVE)
+        if RANLIB:
+            args += ' RANLIB="%s"' % ' '.join(RANLIB)
+
+        p = Popen(
+            "sh ./configure --with-bzip2 --enable-standard-strings " + args,
+            shell=True,
+            cwd=self.cfitsio_build_dir,
+        )
+        p.wait()
+        if p.returncode != 0:
+            raise ValueError(
+                "could not configure cfitsio %s" % self.cfitsio_version)
+
+    def compile_cfitsio(self):
+        p = Popen(
+            "make",
+            shell=True,
+            cwd=self.cfitsio_build_dir,
+        )
+        p.wait()
+        if p.returncode != 0:
+            raise ValueError(
+                "could not compile cfitsio %s" % self.cfitsio_version)
+
+    def check_system_cfitsio_objects(self, obj_name):
+        for lib_dir in self.library_dirs:
+            if os.path.isfile('%s/libcfitsio.a' % (lib_dir)):
+                p = Popen(
+                    "nm -g %s/libcfitsio.a | grep %s" % (lib_dir, obj_name),
+                    shell=True,
+                    stdout=PIPE,
+                    stderr=PIPE,
+                )
+                if len(p.stdout.read()) > 0:
+                    return True
+                else:
+                    return False
+
+
+sources = ["fitsio/fitsio_pywrap.c"]
+
+ext = Extension("fitsio._fitsio_wrap", sources, include_dirs=['numpy'])
+
+description = ("A full featured python library to read from and "
+               "write to FITS files.")
+
+with open(os.path.join(os.path.dirname(__file__), "README.md")) as fp:
+    long_description = fp.read()
+
+classifiers = [
+    "Development Status :: 5 - Production/Stable",
+    "License :: OSI Approved :: GNU General Public License (GPL)",
+    "Topic :: Scientific/Engineering :: Astronomy",
+    "Intended Audience :: Science/Research",
+]
+
+setup(
+    name="fitsio",
+    version="1.1.4",
+    description=description,
+    long_description=long_description,
+    long_description_content_type='text/markdown; charset=UTF-8; variant=GFM',
+    license="GPL",
+    classifiers=classifiers,
+    url="https://github.com/esheldon/fitsio",
+    author="Erin Scott Sheldon",
+    author_email="erin.sheldon@gmail.com",
+    setup_requires=['numpy'],
+    install_requires=['numpy'],
+    packages=find_packages(),
+    include_package_data=True,
+    ext_modules=[ext],
+    cmdclass={"build_ext": build_ext_subclass}
+)