--- /dev/null
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+\f
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+\f
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+\f
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+\f
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+\f
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
--- /dev/null
+include *.txt
+include README.md
+recursive-include cfitsio-4.4.1-20240617 *
+recursive-include patches *
+recursive-include fitsio/test_images *
+recursive-include zlib *
--- /dev/null
+Metadata-Version: 2.2
+Name: fitsio
+Version: 1.2.5
+Summary: A full featured python library to read from and write to FITS files.
+Home-page: https://github.com/esheldon/fitsio
+Author: Erin Scott Sheldon
+Author-email: erin.sheldon@gmail.com
+License: GPL
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Scientific/Engineering :: Astronomy
+Classifier: Intended Audience :: Science/Research
+Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
+License-File: LICENSE.txt
+Requires-Dist: numpy
+Dynamic: author
+Dynamic: author-email
+Dynamic: classifier
+Dynamic: description
+Dynamic: description-content-type
+Dynamic: home-page
+Dynamic: license
+Dynamic: requires-dist
+Dynamic: summary
+
+A python library to read from and write to FITS files.
+
+[](https://travis-ci.com/esheldon/fitsio)
+[](https://github.com/esheldon/fitsio/actions?query=workflow%3Atests)
+
+## Description
+
+This is a python extension written in c and python. Data are read into
+numerical python arrays.
+
+A version of cfitsio is bundled with this package, there is no need to install
+your own, nor will this conflict with a version you have installed.
+
+
+## Some Features
+
+- Read from and write to image, binary, and ascii table extensions.
+- Read arbitrary subsets of table columns and rows without loading all the data
+ to memory.
+- Read image subsets without reading the whole image. Write subsets to existing images.
+- Write and read variable length table columns.
+- Read images and tables using slice notation similar to numpy arrays. This is like a more
+ powerful memmap, since it is column-aware for tables.
+- Append rows to an existing table. Delete row sets and row ranges. Resize tables,
+ or insert rows.
+- Query the columns and rows in a table.
+- Read and write header keywords.
+- Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS).
+- Read/write gzip files directly. Read unix compress (.Z,.zip) and bzip2 (.bz2) files.
+- TDIM information is used to return array columns in the correct shape.
+- Write and read string table columns, including array columns of arbitrary
+ shape.
+- Read and write complex, bool (logical), unsigned integer, signed bytes types.
+- Write checksums into the header and verify them.
+- Insert new columns into tables in-place.
+- Iterate over rows in a table. Data are buffered for efficiency.
+- python 3 support, including python 3 strings
+
+
+## Examples
+
+```python
+import fitsio
+from fitsio import FITS,FITSHDR
+
+# Often you just want to quickly read or write data without bothering to
+# create a FITS object. In that case, you can use the read and write
+# convienience functions.
+
+# read all data from the first hdu that has data
+filename='data.fits'
+data = fitsio.read(filename)
+
+# read a subset of rows and columns from a table
+data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
+
+# read the header
+h = fitsio.read_header(filename)
+# read both data and header
+data,h = fitsio.read(filename, header=True)
+
+# open the file and write a new binary table extension with the data
+# array, which is a numpy array with fields, or "recarray".
+
+data = np.zeros(10, dtype=[('id','i8'),('ra','f8'),('dec','f8')])
+fitsio.write(filename, data)
+
+# Write an image to the same file. By default a new extension is
+# added to the file. use clobber=True to overwrite an existing file
+# instead. To append rows to an existing table, see below.
+
+fitsio.write(filename, image)
+
+#
+# the FITS class gives the you the ability to explore the data, and gives
+# more control
+#
+
+# open a FITS file for reading and explore
+fits=fitsio.FITS('data.fits')
+
+# see what is in here; the FITS object prints itself
+print(fits)
+
+file: data.fits
+mode: READONLY
+extnum hdutype hduname
+0 IMAGE_HDU
+1 BINARY_TBL mytable
+
+# at the python or ipython prompt the fits object will
+# print itself
+>>> fits
+file: data.fits
+... etc
+
+# explore the extensions, either by extension number or
+# extension name if available
+>>> fits[0]
+
+file: data.fits
+extension: 0
+type: IMAGE_HDU
+image info:
+ data type: f8
+ dims: [4096,2048]
+
+# by name; can also use fits[1]
+>>> fits['mytable']
+
+file: data.fits
+extension: 1
+type: BINARY_TBL
+extname: mytable
+rows: 4328342
+column info:
+ i1scalar u1
+ f f4
+ fvec f4 array[2]
+ darr f8 array[3,2]
+ dvarr f8 varray[10]
+ s S5
+ svec S6 array[3]
+ svar S0 vstring[8]
+ sarr S2 array[4,3]
+
+# See bottom for how to get more information for an extension
+
+# [-1] to refers the last HDU
+>>> fits[-1]
+...
+
+# if there are multiple HDUs with the same name, and an EXTVER
+# is set, you can use it. Here extver=2
+# fits['mytable',2]
+
+
+# read the image from extension zero
+img = fits[0].read()
+img = fits[0][:,:]
+
+# read a subset of the image without reading the whole image
+img = fits[0][25:35, 45:55]
+
+
+# read all rows and columns from a binary table extension
+data = fits[1].read()
+data = fits['mytable'].read()
+data = fits[1][:]
+
+# read a subset of rows and columns. By default uses a case-insensitive
+# match. The result retains the names with original case. If columns is a
+# sequence, a numpy array with fields, or recarray is returned
+data = fits[1].read(rows=[1,5], columns=['index','x','y'])
+
+# Similar but using slice notation
+# row subsets
+data = fits[1][10:20]
+data = fits[1][10:20:2]
+data = fits[1][[1,5,18]]
+
+# Using EXTNAME and EXTVER values
+data = fits['SCI',2][10:20]
+
+# Slicing with reverse (flipped) striding
+data = fits[1][40:25]
+data = fits[1][40:25:-5]
+
+# all rows of column 'x'
+data = fits[1]['x'][:]
+
+# Read a few columns at once. This is more efficient than separate read for
+# each column
+data = fits[1]['x','y'][:]
+
+# General column and row subsets.
+columns=['index','x','y']
+rows = [1, 5]
+data = fits[1][columns][rows]
+
+# data are returned in the order requested by the user
+# and duplicates are preserved
+rows = [2, 2, 5]
+data = fits[1][columns][rows]
+
+# iterate over rows in a table hdu
+# faster if we buffer some rows, let's buffer 1000 at a time
+fits=fitsio.FITS(filename,iter_row_buffer=1000)
+for row in fits[1]:
+ print(row)
+
+# iterate over HDUs in a FITS object
+for hdu in fits:
+ data=hdu.read()
+
+# Note dvarr shows type varray[10] and svar shows type vstring[8]. These
+# are variable length columns and the number specified is the maximum size.
+# By default they are read into fixed-length fields in the output array.
+# You can over-ride this by constructing the FITS object with the vstorage
+# keyword or specifying vstorage when reading. Sending vstorage='object'
+# will store the data in variable size object fields to save memory; the
+# default is vstorage='fixed'. Object fields can also be written out to a
+# new FITS file as variable length to save disk space.
+
+fits = fitsio.FITS(filename,vstorage='object')
+# OR
+data = fits[1].read(vstorage='object')
+print(data['dvarr'].dtype)
+ dtype('object')
+
+
+# you can grab a FITS HDU object to simplify notation
+hdu1 = fits[1]
+data = hdu1['x','y'][35:50]
+
+# get rows that satisfy the input expression. See "Row Filtering
+# Specification" in the cfitsio manual (note no temporary table is
+# created in this case, contrary to the cfitsio docs)
+w=fits[1].where("x > 0.25 && y < 35.0")
+data = fits[1][w]
+
+# read the header
+h = fits[0].read_header()
+print(h['BITPIX'])
+ -64
+
+fits.close()
+
+
+# now write some data
+fits = FITS('test.fits','rw')
+
+
+# create a rec array. Note vstr
+# is a variable length string
+nrows=35
+data = np.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'),
+ ('arr','f4',(3,4))])
+data['index'] = np.arange(nrows,dtype='i4')
+data['x'] = np.random.random(nrows)
+data['vstr'] = [str(i) for i in xrange(nrows)]
+data['arr'] = np.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4)
+
+# create a new table extension and write the data
+fits.write(data)
+
+# can also be a list of ordinary arrays if you send the names
+array_list=[xarray,yarray,namearray]
+names=['x','y','name']
+fits.write(array_list, names=names)
+
+# similarly a dict of arrays
+fits.write(dict_of_arrays)
+fits.write(dict_of_arrays, names=names) # control name order
+
+# append more rows to the table. The fields in data2 should match columns
+# in the table. missing columns will be filled with zeros
+fits[-1].append(data2)
+
+# insert a new column into a table
+fits[-1].insert_column('newcol', data)
+
+# insert with a specific colnum
+fits[-1].insert_column('newcol', data, colnum=2)
+
+# overwrite rows
+fits[-1].write(data)
+
+# overwrite starting at a particular row. The table will grow if needed
+fits[-1].write(data, firstrow=350)
+
+
+# create an image
+img=np.arange(2*3,dtype='i4').reshape(2,3)
+
+# write an image in a new HDU (if this is a new file, the primary HDU)
+fits.write(img)
+
+# write an image with rice compression
+fits.write(img, compress='rice')
+
+# control the compression
+fimg=np.random.normal(size=2*3).reshape(2, 3)
+fits.write(img, compress='rice', qlevel=16, qmethod='SUBTRACTIVE_DITHER_2')
+
+# lossless gzip compression for integers or floating point
+fits.write(img, compress='gzip', qlevel=None)
+fits.write(fimg, compress='gzip', qlevel=None)
+
+# overwrite the image
+fits[ext].write(img2)
+
+# write into an existing image, starting at the location [300,400]
+# the image will be expanded if needed
+fits[ext].write(img3, start=[300,400])
+
+# change the shape of the image on disk
+fits[ext].reshape([250,100])
+
+# add checksums for the data
+fits[-1].write_checksum()
+
+# can later verify data integridy
+fits[-1].verify_checksum()
+
+# you can also write a header at the same time. The header can be
+# - a simple dict (no comments)
+# - a list of dicts with 'name','value','comment' fields
+# - a FITSHDR object
+
+hdict = {'somekey': 35, 'location': 'kitt peak'}
+fits.write(data, header=hdict)
+hlist = [{'name':'observer', 'value':'ES', 'comment':'who'},
+ {'name':'location','value':'CTIO'},
+ {'name':'photometric','value':True}]
+fits.write(data, header=hlist)
+hdr=FITSHDR(hlist)
+fits.write(data, header=hdr)
+
+# you can add individual keys to an existing HDU
+fits[1].write_key(name, value, comment="my comment")
+
+# Write multiple header keys to an existing HDU. Here records
+# is the same as sent with header= above
+fits[1].write_keys(records)
+
+# write special COMMENT fields
+fits[1].write_comment("observer JS")
+fits[1].write_comment("we had good weather")
+
+# write special history fields
+fits[1].write_history("processed with software X")
+fits[1].write_history("re-processed with software Y")
+
+fits.close()
+
+# using a context, the file is closed automatically after leaving the block
+with FITS('path/to/file') as fits:
+ data = fits[ext].read()
+
+ # you can check if a header exists using "in":
+ if 'blah' in fits:
+ data=fits['blah'].read()
+ if 2 in f:
+ data=fits[2].read()
+
+# methods to get more information about extension. For extension 1:
+f[1].get_info() # lots of info about the extension
+f[1].has_data() # returns True if data is present in extension
+f[1].get_extname()
+f[1].get_extver()
+f[1].get_extnum() # return zero-offset extension number
+f[1].get_exttype() # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU'
+f[1].get_offsets() # byte offsets (header_start, data_start, data_end)
+f[1].is_compressed() # for images. True if tile-compressed
+f[1].get_colnames() # for tables
+f[1].get_colname(colnum) # for tables find the name from column number
+f[1].get_nrows() # for tables
+f[1].get_rec_dtype() # for tables
+f[1].get_rec_column_descr() # for tables
+f[1].get_vstorage() # for tables, storage mechanism for variable
+ # length columns
+
+# public attributes you can feel free to change as needed
+f[1].lower # If True, lower case colnames on output
+f[1].upper # If True, upper case colnames on output
+f[1].case_sensitive # if True, names are matched case sensitive
+```
+
+
+## Installation
+
+The easiest way is using pip or conda. To get the latest release
+
+ pip install fitsio
+
+ # update fitsio (and everything else)
+ pip install fitsio --upgrade
+
+ # if pip refuses to update to a newer version
+ pip install fitsio --upgrade --ignore-installed
+
+ # if you only want to upgrade fitsio
+ pip install fitsio --no-deps --upgrade --ignore-installed
+
+ # for conda, use conda-forge
+ conda install -c conda-forge fitsio
+
+You can also get the latest source tarball release from
+
+ https://pypi.python.org/pypi/fitsio
+
+or the bleeding edge source from github or use git. To check out
+the code for the first time
+
+ git clone https://github.com/esheldon/fitsio.git
+
+Or at a later time to update to the latest
+
+ cd fitsio
+ git update
+
+Use tar xvfz to untar the file, enter the fitsio directory and type
+
+ python setup.py install
+
+optionally with a prefix
+
+ python setup.py install --prefix=/some/path
+
+## Requirements
+
+- python 2 or python 3
+- a C compiler and build tools like `make`, `patch`, etc.
+- numpy (See the note below. Generally, numpy 1.11 or later is better.)
+
+
+### Do not use numpy 1.10.0 or 1.10.1
+
+There is a serious performance regression in numpy 1.10 that results
+in fitsio running tens to hundreds of times slower. A fix may be
+forthcoming in a later release. Please comment here if this
+has already impacted your work https://github.com/numpy/numpy/issues/6467
+
+
+## Tests
+
+The unit tests should all pass for full support.
+
+```bash
+pytest fitsio
+```
+
+Some tests may fail if certain libraries are not available, such
+as bzip2. This failure only implies that bzipped files cannot
+be read, without affecting other functionality.
+
+## Notes on Usage and Features
+
+### cfitsio bundling
+
+We bundle cfitsio partly because many deployed versions of cfitsio in the
+wild do not have support for interesting features like tiled image compression.
+Bundling a version that meets our needs is a safe alternative.
+
+### array ordering
+
+Since numpy uses C order, FITS uses fortran order, we have to write the TDIM
+and image dimensions in reverse order, but write the data as is. Then we need
+to also reverse the dims as read from the header when creating the numpy dtype,
+but read as is.
+
+### `distutils` vs `setuptools`
+
+As of version `1.0.0`, `fitsio` has been transitioned to `setuptools` for packaging
+and installation. There are many reasons to do this (and to not do this). However,
+at a practical level, what this means for you is that you may have trouble uninstalling
+older versions with `pip` via `pip uninstall fitsio`. If you do, the best thing to do is
+to manually remove the files manually. See this [stackoverflow question](https://stackoverflow.com/questions/402359/how-do-you-uninstall-a-python-package-that-was-installed-using-distutils)
+for example.
+
+### python 3 strings
+
+As of version `1.0.0`, fitsio now supports Python 3 strings natively. This support
+means that for Python 3, native strings are read from and written correctly to
+FITS files. All byte string columns are treated as ASCII-encoded unicode strings
+as well. For FITS files written with a previous version of fitsio, the data
+in Python 3 will now come back as a string and not a byte string. Note that this
+support is not the same as full unicode support. Internally, fitsio only supports
+the ASCII character set.
+
+## TODO
+
+- HDU groups: does anyone use these? If so open an issue!
--- /dev/null
+A python library to read from and write to FITS files.
+
+[](https://travis-ci.com/esheldon/fitsio)
+[](https://github.com/esheldon/fitsio/actions?query=workflow%3Atests)
+
+## Description
+
+This is a python extension written in c and python. Data are read into
+numerical python arrays.
+
+A version of cfitsio is bundled with this package, there is no need to install
+your own, nor will this conflict with a version you have installed.
+
+
+## Some Features
+
+- Read from and write to image, binary, and ascii table extensions.
+- Read arbitrary subsets of table columns and rows without loading all the data
+ to memory.
+- Read image subsets without reading the whole image. Write subsets to existing images.
+- Write and read variable length table columns.
+- Read images and tables using slice notation similar to numpy arrays. This is like a more
+ powerful memmap, since it is column-aware for tables.
+- Append rows to an existing table. Delete row sets and row ranges. Resize tables,
+ or insert rows.
+- Query the columns and rows in a table.
+- Read and write header keywords.
+- Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS).
+- Read/write gzip files directly. Read unix compress (.Z,.zip) and bzip2 (.bz2) files.
+- TDIM information is used to return array columns in the correct shape.
+- Write and read string table columns, including array columns of arbitrary
+ shape.
+- Read and write complex, bool (logical), unsigned integer, signed bytes types.
+- Write checksums into the header and verify them.
+- Insert new columns into tables in-place.
+- Iterate over rows in a table. Data are buffered for efficiency.
+- python 3 support, including python 3 strings
+
+
+## Examples
+
+```python
+import fitsio
+from fitsio import FITS,FITSHDR
+
+# Often you just want to quickly read or write data without bothering to
+# create a FITS object. In that case, you can use the read and write
+# convienience functions.
+
+# read all data from the first hdu that has data
+filename='data.fits'
+data = fitsio.read(filename)
+
+# read a subset of rows and columns from a table
+data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
+
+# read the header
+h = fitsio.read_header(filename)
+# read both data and header
+data,h = fitsio.read(filename, header=True)
+
+# open the file and write a new binary table extension with the data
+# array, which is a numpy array with fields, or "recarray".
+
+data = np.zeros(10, dtype=[('id','i8'),('ra','f8'),('dec','f8')])
+fitsio.write(filename, data)
+
+# Write an image to the same file. By default a new extension is
+# added to the file. use clobber=True to overwrite an existing file
+# instead. To append rows to an existing table, see below.
+
+fitsio.write(filename, image)
+
+#
+# the FITS class gives the you the ability to explore the data, and gives
+# more control
+#
+
+# open a FITS file for reading and explore
+fits=fitsio.FITS('data.fits')
+
+# see what is in here; the FITS object prints itself
+print(fits)
+
+file: data.fits
+mode: READONLY
+extnum hdutype hduname
+0 IMAGE_HDU
+1 BINARY_TBL mytable
+
+# at the python or ipython prompt the fits object will
+# print itself
+>>> fits
+file: data.fits
+... etc
+
+# explore the extensions, either by extension number or
+# extension name if available
+>>> fits[0]
+
+file: data.fits
+extension: 0
+type: IMAGE_HDU
+image info:
+ data type: f8
+ dims: [4096,2048]
+
+# by name; can also use fits[1]
+>>> fits['mytable']
+
+file: data.fits
+extension: 1
+type: BINARY_TBL
+extname: mytable
+rows: 4328342
+column info:
+ i1scalar u1
+ f f4
+ fvec f4 array[2]
+ darr f8 array[3,2]
+ dvarr f8 varray[10]
+ s S5
+ svec S6 array[3]
+ svar S0 vstring[8]
+ sarr S2 array[4,3]
+
+# See bottom for how to get more information for an extension
+
+# [-1] to refers the last HDU
+>>> fits[-1]
+...
+
+# if there are multiple HDUs with the same name, and an EXTVER
+# is set, you can use it. Here extver=2
+# fits['mytable',2]
+
+
+# read the image from extension zero
+img = fits[0].read()
+img = fits[0][:,:]
+
+# read a subset of the image without reading the whole image
+img = fits[0][25:35, 45:55]
+
+
+# read all rows and columns from a binary table extension
+data = fits[1].read()
+data = fits['mytable'].read()
+data = fits[1][:]
+
+# read a subset of rows and columns. By default uses a case-insensitive
+# match. The result retains the names with original case. If columns is a
+# sequence, a numpy array with fields, or recarray is returned
+data = fits[1].read(rows=[1,5], columns=['index','x','y'])
+
+# Similar but using slice notation
+# row subsets
+data = fits[1][10:20]
+data = fits[1][10:20:2]
+data = fits[1][[1,5,18]]
+
+# Using EXTNAME and EXTVER values
+data = fits['SCI',2][10:20]
+
+# Slicing with reverse (flipped) striding
+data = fits[1][40:25]
+data = fits[1][40:25:-5]
+
+# all rows of column 'x'
+data = fits[1]['x'][:]
+
+# Read a few columns at once. This is more efficient than separate read for
+# each column
+data = fits[1]['x','y'][:]
+
+# General column and row subsets.
+columns=['index','x','y']
+rows = [1, 5]
+data = fits[1][columns][rows]
+
+# data are returned in the order requested by the user
+# and duplicates are preserved
+rows = [2, 2, 5]
+data = fits[1][columns][rows]
+
+# iterate over rows in a table hdu
+# faster if we buffer some rows, let's buffer 1000 at a time
+fits=fitsio.FITS(filename,iter_row_buffer=1000)
+for row in fits[1]:
+ print(row)
+
+# iterate over HDUs in a FITS object
+for hdu in fits:
+ data=hdu.read()
+
+# Note dvarr shows type varray[10] and svar shows type vstring[8]. These
+# are variable length columns and the number specified is the maximum size.
+# By default they are read into fixed-length fields in the output array.
+# You can over-ride this by constructing the FITS object with the vstorage
+# keyword or specifying vstorage when reading. Sending vstorage='object'
+# will store the data in variable size object fields to save memory; the
+# default is vstorage='fixed'. Object fields can also be written out to a
+# new FITS file as variable length to save disk space.
+
+fits = fitsio.FITS(filename,vstorage='object')
+# OR
+data = fits[1].read(vstorage='object')
+print(data['dvarr'].dtype)
+ dtype('object')
+
+
+# you can grab a FITS HDU object to simplify notation
+hdu1 = fits[1]
+data = hdu1['x','y'][35:50]
+
+# get rows that satisfy the input expression. See "Row Filtering
+# Specification" in the cfitsio manual (note no temporary table is
+# created in this case, contrary to the cfitsio docs)
+w=fits[1].where("x > 0.25 && y < 35.0")
+data = fits[1][w]
+
+# read the header
+h = fits[0].read_header()
+print(h['BITPIX'])
+ -64
+
+fits.close()
+
+
+# now write some data
+fits = FITS('test.fits','rw')
+
+
+# create a rec array. Note vstr
+# is a variable length string
+nrows=35
+data = np.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'),
+ ('arr','f4',(3,4))])
+data['index'] = np.arange(nrows,dtype='i4')
+data['x'] = np.random.random(nrows)
+data['vstr'] = [str(i) for i in xrange(nrows)]
+data['arr'] = np.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4)
+
+# create a new table extension and write the data
+fits.write(data)
+
+# can also be a list of ordinary arrays if you send the names
+array_list=[xarray,yarray,namearray]
+names=['x','y','name']
+fits.write(array_list, names=names)
+
+# similarly a dict of arrays
+fits.write(dict_of_arrays)
+fits.write(dict_of_arrays, names=names) # control name order
+
+# append more rows to the table. The fields in data2 should match columns
+# in the table. missing columns will be filled with zeros
+fits[-1].append(data2)
+
+# insert a new column into a table
+fits[-1].insert_column('newcol', data)
+
+# insert with a specific colnum
+fits[-1].insert_column('newcol', data, colnum=2)
+
+# overwrite rows
+fits[-1].write(data)
+
+# overwrite starting at a particular row. The table will grow if needed
+fits[-1].write(data, firstrow=350)
+
+
+# create an image
+img=np.arange(2*3,dtype='i4').reshape(2,3)
+
+# write an image in a new HDU (if this is a new file, the primary HDU)
+fits.write(img)
+
+# write an image with rice compression
+fits.write(img, compress='rice')
+
+# control the compression
+fimg=np.random.normal(size=2*3).reshape(2, 3)
+fits.write(img, compress='rice', qlevel=16, qmethod='SUBTRACTIVE_DITHER_2')
+
+# lossless gzip compression for integers or floating point
+fits.write(img, compress='gzip', qlevel=None)
+fits.write(fimg, compress='gzip', qlevel=None)
+
+# overwrite the image
+fits[ext].write(img2)
+
+# write into an existing image, starting at the location [300,400]
+# the image will be expanded if needed
+fits[ext].write(img3, start=[300,400])
+
+# change the shape of the image on disk
+fits[ext].reshape([250,100])
+
+# add checksums for the data
+fits[-1].write_checksum()
+
+# can later verify data integridy
+fits[-1].verify_checksum()
+
+# you can also write a header at the same time. The header can be
+# - a simple dict (no comments)
+# - a list of dicts with 'name','value','comment' fields
+# - a FITSHDR object
+
+hdict = {'somekey': 35, 'location': 'kitt peak'}
+fits.write(data, header=hdict)
+hlist = [{'name':'observer', 'value':'ES', 'comment':'who'},
+ {'name':'location','value':'CTIO'},
+ {'name':'photometric','value':True}]
+fits.write(data, header=hlist)
+hdr=FITSHDR(hlist)
+fits.write(data, header=hdr)
+
+# you can add individual keys to an existing HDU
+fits[1].write_key(name, value, comment="my comment")
+
+# Write multiple header keys to an existing HDU. Here records
+# is the same as sent with header= above
+fits[1].write_keys(records)
+
+# write special COMMENT fields
+fits[1].write_comment("observer JS")
+fits[1].write_comment("we had good weather")
+
+# write special history fields
+fits[1].write_history("processed with software X")
+fits[1].write_history("re-processed with software Y")
+
+fits.close()
+
+# using a context, the file is closed automatically after leaving the block
+with FITS('path/to/file') as fits:
+ data = fits[ext].read()
+
+ # you can check if a header exists using "in":
+ if 'blah' in fits:
+ data=fits['blah'].read()
+ if 2 in f:
+ data=fits[2].read()
+
+# methods to get more information about extension. For extension 1:
+f[1].get_info() # lots of info about the extension
+f[1].has_data() # returns True if data is present in extension
+f[1].get_extname()
+f[1].get_extver()
+f[1].get_extnum() # return zero-offset extension number
+f[1].get_exttype() # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU'
+f[1].get_offsets() # byte offsets (header_start, data_start, data_end)
+f[1].is_compressed() # for images. True if tile-compressed
+f[1].get_colnames() # for tables
+f[1].get_colname(colnum) # for tables find the name from column number
+f[1].get_nrows() # for tables
+f[1].get_rec_dtype() # for tables
+f[1].get_rec_column_descr() # for tables
+f[1].get_vstorage() # for tables, storage mechanism for variable
+ # length columns
+
+# public attributes you can feel free to change as needed
+f[1].lower # If True, lower case colnames on output
+f[1].upper # If True, upper case colnames on output
+f[1].case_sensitive # if True, names are matched case sensitive
+```
+
+
+## Installation
+
+The easiest way is using pip or conda. To get the latest release
+
+ pip install fitsio
+
+ # update fitsio (and everything else)
+ pip install fitsio --upgrade
+
+ # if pip refuses to update to a newer version
+ pip install fitsio --upgrade --ignore-installed
+
+ # if you only want to upgrade fitsio
+ pip install fitsio --no-deps --upgrade --ignore-installed
+
+ # for conda, use conda-forge
+ conda install -c conda-forge fitsio
+
+You can also get the latest source tarball release from
+
+ https://pypi.python.org/pypi/fitsio
+
+or the bleeding edge source from github or use git. To check out
+the code for the first time
+
+ git clone https://github.com/esheldon/fitsio.git
+
+Or at a later time to update to the latest
+
+ cd fitsio
+ git update
+
+Use tar xvfz to untar the file, enter the fitsio directory and type
+
+ python setup.py install
+
+optionally with a prefix
+
+ python setup.py install --prefix=/some/path
+
+## Requirements
+
+- python 2 or python 3
+- a C compiler and build tools like `make`, `patch`, etc.
+- numpy (See the note below. Generally, numpy 1.11 or later is better.)
+
+
+### Do not use numpy 1.10.0 or 1.10.1
+
+There is a serious performance regression in numpy 1.10 that results
+in fitsio running tens to hundreds of times slower. A fix may be
+forthcoming in a later release. Please comment here if this
+has already impacted your work https://github.com/numpy/numpy/issues/6467
+
+
+## Tests
+
+The unit tests should all pass for full support.
+
+```bash
+pytest fitsio
+```
+
+Some tests may fail if certain libraries are not available, such
+as bzip2. This failure only implies that bzipped files cannot
+be read, without affecting other functionality.
+
+## Notes on Usage and Features
+
+### cfitsio bundling
+
+We bundle cfitsio partly because many deployed versions of cfitsio in the
+wild do not have support for interesting features like tiled image compression.
+Bundling a version that meets our needs is a safe alternative.
+
+### array ordering
+
+Since numpy uses C order, FITS uses fortran order, we have to write the TDIM
+and image dimensions in reverse order, but write the data as is. Then we need
+to also reverse the dims as read from the header when creating the numpy dtype,
+but read as is.
+
+### `distutils` vs `setuptools`
+
+As of version `1.0.0`, `fitsio` has been transitioned to `setuptools` for packaging
+and installation. There are many reasons to do this (and to not do this). However,
+at a practical level, what this means for you is that you may have trouble uninstalling
+older versions with `pip` via `pip uninstall fitsio`. If you do, the best thing to do is
+to manually remove the files manually. See this [stackoverflow question](https://stackoverflow.com/questions/402359/how-do-you-uninstall-a-python-package-that-was-installed-using-distutils)
+for example.
+
+### python 3 strings
+
+As of version `1.0.0`, fitsio now supports Python 3 strings natively. This support
+means that for Python 3, native strings are read from and written correctly to
+FITS files. All byte string columns are treated as ASCII-encoded unicode strings
+as well. For FITS files written with a previous version of fitsio, the data
+in Python 3 will now come back as a string and not a byte string. Note that this
+support is not the same as full unicode support. Internally, fitsio only supports
+the ASCII character set.
+
+## TODO
+
+- HDU groups: does anyone use these? If so open an issue!
--- /dev/null
+Metadata-Version: 2.2
+Name: fitsio
+Version: 1.2.5
+Summary: A full featured python library to read from and write to FITS files.
+Home-page: https://github.com/esheldon/fitsio
+Author: Erin Scott Sheldon
+Author-email: erin.sheldon@gmail.com
+License: GPL
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Topic :: Scientific/Engineering :: Astronomy
+Classifier: Intended Audience :: Science/Research
+Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
+License-File: LICENSE.txt
+Requires-Dist: numpy
+Dynamic: author
+Dynamic: author-email
+Dynamic: classifier
+Dynamic: description
+Dynamic: description-content-type
+Dynamic: home-page
+Dynamic: license
+Dynamic: requires-dist
+Dynamic: summary
+
+A python library to read from and write to FITS files.
+
+[](https://travis-ci.com/esheldon/fitsio)
+[](https://github.com/esheldon/fitsio/actions?query=workflow%3Atests)
+
+## Description
+
+This is a python extension written in c and python. Data are read into
+numerical python arrays.
+
+A version of cfitsio is bundled with this package, there is no need to install
+your own, nor will this conflict with a version you have installed.
+
+
+## Some Features
+
+- Read from and write to image, binary, and ascii table extensions.
+- Read arbitrary subsets of table columns and rows without loading all the data
+ to memory.
+- Read image subsets without reading the whole image. Write subsets to existing images.
+- Write and read variable length table columns.
+- Read images and tables using slice notation similar to numpy arrays. This is like a more
+ powerful memmap, since it is column-aware for tables.
+- Append rows to an existing table. Delete row sets and row ranges. Resize tables,
+ or insert rows.
+- Query the columns and rows in a table.
+- Read and write header keywords.
+- Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS).
+- Read/write gzip files directly. Read unix compress (.Z,.zip) and bzip2 (.bz2) files.
+- TDIM information is used to return array columns in the correct shape.
+- Write and read string table columns, including array columns of arbitrary
+ shape.
+- Read and write complex, bool (logical), unsigned integer, signed bytes types.
+- Write checksums into the header and verify them.
+- Insert new columns into tables in-place.
+- Iterate over rows in a table. Data are buffered for efficiency.
+- python 3 support, including python 3 strings
+
+
+## Examples
+
+```python
+import fitsio
+from fitsio import FITS,FITSHDR
+
+# Often you just want to quickly read or write data without bothering to
+# create a FITS object. In that case, you can use the read and write
+# convienience functions.
+
+# read all data from the first hdu that has data
+filename='data.fits'
+data = fitsio.read(filename)
+
+# read a subset of rows and columns from a table
+data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2)
+
+# read the header
+h = fitsio.read_header(filename)
+# read both data and header
+data,h = fitsio.read(filename, header=True)
+
+# open the file and write a new binary table extension with the data
+# array, which is a numpy array with fields, or "recarray".
+
+data = np.zeros(10, dtype=[('id','i8'),('ra','f8'),('dec','f8')])
+fitsio.write(filename, data)
+
+# Write an image to the same file. By default a new extension is
+# added to the file. use clobber=True to overwrite an existing file
+# instead. To append rows to an existing table, see below.
+
+fitsio.write(filename, image)
+
+#
+# the FITS class gives the you the ability to explore the data, and gives
+# more control
+#
+
+# open a FITS file for reading and explore
+fits=fitsio.FITS('data.fits')
+
+# see what is in here; the FITS object prints itself
+print(fits)
+
+file: data.fits
+mode: READONLY
+extnum hdutype hduname
+0 IMAGE_HDU
+1 BINARY_TBL mytable
+
+# at the python or ipython prompt the fits object will
+# print itself
+>>> fits
+file: data.fits
+... etc
+
+# explore the extensions, either by extension number or
+# extension name if available
+>>> fits[0]
+
+file: data.fits
+extension: 0
+type: IMAGE_HDU
+image info:
+ data type: f8
+ dims: [4096,2048]
+
+# by name; can also use fits[1]
+>>> fits['mytable']
+
+file: data.fits
+extension: 1
+type: BINARY_TBL
+extname: mytable
+rows: 4328342
+column info:
+ i1scalar u1
+ f f4
+ fvec f4 array[2]
+ darr f8 array[3,2]
+ dvarr f8 varray[10]
+ s S5
+ svec S6 array[3]
+ svar S0 vstring[8]
+ sarr S2 array[4,3]
+
+# See bottom for how to get more information for an extension
+
+# [-1] to refers the last HDU
+>>> fits[-1]
+...
+
+# if there are multiple HDUs with the same name, and an EXTVER
+# is set, you can use it. Here extver=2
+# fits['mytable',2]
+
+
+# read the image from extension zero
+img = fits[0].read()
+img = fits[0][:,:]
+
+# read a subset of the image without reading the whole image
+img = fits[0][25:35, 45:55]
+
+
+# read all rows and columns from a binary table extension
+data = fits[1].read()
+data = fits['mytable'].read()
+data = fits[1][:]
+
+# read a subset of rows and columns. By default uses a case-insensitive
+# match. The result retains the names with original case. If columns is a
+# sequence, a numpy array with fields, or recarray is returned
+data = fits[1].read(rows=[1,5], columns=['index','x','y'])
+
+# Similar but using slice notation
+# row subsets
+data = fits[1][10:20]
+data = fits[1][10:20:2]
+data = fits[1][[1,5,18]]
+
+# Using EXTNAME and EXTVER values
+data = fits['SCI',2][10:20]
+
+# Slicing with reverse (flipped) striding
+data = fits[1][40:25]
+data = fits[1][40:25:-5]
+
+# all rows of column 'x'
+data = fits[1]['x'][:]
+
+# Read a few columns at once. This is more efficient than separate read for
+# each column
+data = fits[1]['x','y'][:]
+
+# General column and row subsets.
+columns=['index','x','y']
+rows = [1, 5]
+data = fits[1][columns][rows]
+
+# data are returned in the order requested by the user
+# and duplicates are preserved
+rows = [2, 2, 5]
+data = fits[1][columns][rows]
+
+# iterate over rows in a table hdu
+# faster if we buffer some rows, let's buffer 1000 at a time
+fits=fitsio.FITS(filename,iter_row_buffer=1000)
+for row in fits[1]:
+ print(row)
+
+# iterate over HDUs in a FITS object
+for hdu in fits:
+ data=hdu.read()
+
+# Note dvarr shows type varray[10] and svar shows type vstring[8]. These
+# are variable length columns and the number specified is the maximum size.
+# By default they are read into fixed-length fields in the output array.
+# You can over-ride this by constructing the FITS object with the vstorage
+# keyword or specifying vstorage when reading. Sending vstorage='object'
+# will store the data in variable size object fields to save memory; the
+# default is vstorage='fixed'. Object fields can also be written out to a
+# new FITS file as variable length to save disk space.
+
+fits = fitsio.FITS(filename,vstorage='object')
+# OR
+data = fits[1].read(vstorage='object')
+print(data['dvarr'].dtype)
+ dtype('object')
+
+
+# you can grab a FITS HDU object to simplify notation
+hdu1 = fits[1]
+data = hdu1['x','y'][35:50]
+
+# get rows that satisfy the input expression. See "Row Filtering
+# Specification" in the cfitsio manual (note no temporary table is
+# created in this case, contrary to the cfitsio docs)
+w=fits[1].where("x > 0.25 && y < 35.0")
+data = fits[1][w]
+
+# read the header
+h = fits[0].read_header()
+print(h['BITPIX'])
+ -64
+
+fits.close()
+
+
+# now write some data
+fits = FITS('test.fits','rw')
+
+
+# create a rec array. Note vstr
+# is a variable length string
+nrows=35
+data = np.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'),
+ ('arr','f4',(3,4))])
+data['index'] = np.arange(nrows,dtype='i4')
+data['x'] = np.random.random(nrows)
+data['vstr'] = [str(i) for i in xrange(nrows)]
+data['arr'] = np.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4)
+
+# create a new table extension and write the data
+fits.write(data)
+
+# can also be a list of ordinary arrays if you send the names
+array_list=[xarray,yarray,namearray]
+names=['x','y','name']
+fits.write(array_list, names=names)
+
+# similarly a dict of arrays
+fits.write(dict_of_arrays)
+fits.write(dict_of_arrays, names=names) # control name order
+
+# append more rows to the table. The fields in data2 should match columns
+# in the table. missing columns will be filled with zeros
+fits[-1].append(data2)
+
+# insert a new column into a table
+fits[-1].insert_column('newcol', data)
+
+# insert with a specific colnum
+fits[-1].insert_column('newcol', data, colnum=2)
+
+# overwrite rows
+fits[-1].write(data)
+
+# overwrite starting at a particular row. The table will grow if needed
+fits[-1].write(data, firstrow=350)
+
+
+# create an image
+img=np.arange(2*3,dtype='i4').reshape(2,3)
+
+# write an image in a new HDU (if this is a new file, the primary HDU)
+fits.write(img)
+
+# write an image with rice compression
+fits.write(img, compress='rice')
+
+# control the compression
+fimg=np.random.normal(size=2*3).reshape(2, 3)
+fits.write(img, compress='rice', qlevel=16, qmethod='SUBTRACTIVE_DITHER_2')
+
+# lossless gzip compression for integers or floating point
+fits.write(img, compress='gzip', qlevel=None)
+fits.write(fimg, compress='gzip', qlevel=None)
+
+# overwrite the image
+fits[ext].write(img2)
+
+# write into an existing image, starting at the location [300,400]
+# the image will be expanded if needed
+fits[ext].write(img3, start=[300,400])
+
+# change the shape of the image on disk
+fits[ext].reshape([250,100])
+
+# add checksums for the data
+fits[-1].write_checksum()
+
+# can later verify data integridy
+fits[-1].verify_checksum()
+
+# you can also write a header at the same time. The header can be
+# - a simple dict (no comments)
+# - a list of dicts with 'name','value','comment' fields
+# - a FITSHDR object
+
+hdict = {'somekey': 35, 'location': 'kitt peak'}
+fits.write(data, header=hdict)
+hlist = [{'name':'observer', 'value':'ES', 'comment':'who'},
+ {'name':'location','value':'CTIO'},
+ {'name':'photometric','value':True}]
+fits.write(data, header=hlist)
+hdr=FITSHDR(hlist)
+fits.write(data, header=hdr)
+
+# you can add individual keys to an existing HDU
+fits[1].write_key(name, value, comment="my comment")
+
+# Write multiple header keys to an existing HDU. Here records
+# is the same as sent with header= above
+fits[1].write_keys(records)
+
+# write special COMMENT fields
+fits[1].write_comment("observer JS")
+fits[1].write_comment("we had good weather")
+
+# write special history fields
+fits[1].write_history("processed with software X")
+fits[1].write_history("re-processed with software Y")
+
+fits.close()
+
+# using a context, the file is closed automatically after leaving the block
+with FITS('path/to/file') as fits:
+ data = fits[ext].read()
+
+ # you can check if a header exists using "in":
+ if 'blah' in fits:
+ data=fits['blah'].read()
+ if 2 in f:
+ data=fits[2].read()
+
+# methods to get more information about extension. For extension 1:
+f[1].get_info() # lots of info about the extension
+f[1].has_data() # returns True if data is present in extension
+f[1].get_extname()
+f[1].get_extver()
+f[1].get_extnum() # return zero-offset extension number
+f[1].get_exttype() # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU'
+f[1].get_offsets() # byte offsets (header_start, data_start, data_end)
+f[1].is_compressed() # for images. True if tile-compressed
+f[1].get_colnames() # for tables
+f[1].get_colname(colnum) # for tables find the name from column number
+f[1].get_nrows() # for tables
+f[1].get_rec_dtype() # for tables
+f[1].get_rec_column_descr() # for tables
+f[1].get_vstorage() # for tables, storage mechanism for variable
+ # length columns
+
+# public attributes you can feel free to change as needed
+f[1].lower # If True, lower case colnames on output
+f[1].upper # If True, upper case colnames on output
+f[1].case_sensitive # if True, names are matched case sensitive
+```
+
+
+## Installation
+
+The easiest way is using pip or conda. To get the latest release
+
+ pip install fitsio
+
+ # update fitsio (and everything else)
+ pip install fitsio --upgrade
+
+ # if pip refuses to update to a newer version
+ pip install fitsio --upgrade --ignore-installed
+
+ # if you only want to upgrade fitsio
+ pip install fitsio --no-deps --upgrade --ignore-installed
+
+ # for conda, use conda-forge
+ conda install -c conda-forge fitsio
+
+You can also get the latest source tarball release from
+
+ https://pypi.python.org/pypi/fitsio
+
+or the bleeding edge source from github or use git. To check out
+the code for the first time
+
+ git clone https://github.com/esheldon/fitsio.git
+
+Or at a later time to update to the latest
+
+ cd fitsio
+ git update
+
+Use tar xvfz to untar the file, enter the fitsio directory and type
+
+ python setup.py install
+
+optionally with a prefix
+
+ python setup.py install --prefix=/some/path
+
+## Requirements
+
+- python 2 or python 3
+- a C compiler and build tools like `make`, `patch`, etc.
+- numpy (See the note below. Generally, numpy 1.11 or later is better.)
+
+
+### Do not use numpy 1.10.0 or 1.10.1
+
+There is a serious performance regression in numpy 1.10 that results
+in fitsio running tens to hundreds of times slower. A fix may be
+forthcoming in a later release. Please comment here if this
+has already impacted your work https://github.com/numpy/numpy/issues/6467
+
+
+## Tests
+
+The unit tests should all pass for full support.
+
+```bash
+pytest fitsio
+```
+
+Some tests may fail if certain libraries are not available, such
+as bzip2. This failure only implies that bzipped files cannot
+be read, without affecting other functionality.
+
+## Notes on Usage and Features
+
+### cfitsio bundling
+
+We bundle cfitsio partly because many deployed versions of cfitsio in the
+wild do not have support for interesting features like tiled image compression.
+Bundling a version that meets our needs is a safe alternative.
+
+### array ordering
+
+Since numpy uses C order, FITS uses fortran order, we have to write the TDIM
+and image dimensions in reverse order, but write the data as is. Then we need
+to also reverse the dims as read from the header when creating the numpy dtype,
+but read as is.
+
+### `distutils` vs `setuptools`
+
+As of version `1.0.0`, `fitsio` has been transitioned to `setuptools` for packaging
+and installation. There are many reasons to do this (and to not do this). However,
+at a practical level, what this means for you is that you may have trouble uninstalling
+older versions with `pip` via `pip uninstall fitsio`. If you do, the best thing to do is
+to manually remove the files manually. See this [stackoverflow question](https://stackoverflow.com/questions/402359/how-do-you-uninstall-a-python-package-that-was-installed-using-distutils)
+for example.
+
+### python 3 strings
+
+As of version `1.0.0`, fitsio now supports Python 3 strings natively. This support
+means that for Python 3, native strings are read from and written correctly to
+FITS files. All byte string columns are treated as ASCII-encoded unicode strings
+as well. For FITS files written with a previous version of fitsio, the data
+in Python 3 will now come back as a string and not a byte string. Note that this
+support is not the same as full unicode support. Internally, fitsio only supports
+the ASCII character set.
+
+## TODO
+
+- HDU groups: does anyone use these? If so open an issue!
--- /dev/null
+LICENSE.txt
+MANIFEST.in
+README.md
+setup.py
+cfitsio-4.4.1-20240617/.gitignore
+cfitsio-4.4.1-20240617/CMakeLists.txt
+cfitsio-4.4.1-20240617/Makefile.in
+cfitsio-4.4.1-20240617/README.MacOS
+cfitsio-4.4.1-20240617/README.md
+cfitsio-4.4.1-20240617/README.win
+cfitsio-4.4.1-20240617/README_OLD.win
+cfitsio-4.4.1-20240617/buffers.c
+cfitsio-4.4.1-20240617/cfileio.c
+cfitsio-4.4.1-20240617/cfitsio-config-version.cmake.in
+cfitsio-4.4.1-20240617/cfitsio-config.cmake.in
+cfitsio-4.4.1-20240617/cfitsio.pc.cmake
+cfitsio-4.4.1-20240617/cfitsio.pc.in
+cfitsio-4.4.1-20240617/cfitsio_mac.sit.hqx
+cfitsio-4.4.1-20240617/cfortran.h
+cfitsio-4.4.1-20240617/checksum.c
+cfitsio-4.4.1-20240617/config.guess
+cfitsio-4.4.1-20240617/config.sub
+cfitsio-4.4.1-20240617/configure
+cfitsio-4.4.1-20240617/configure.in
+cfitsio-4.4.1-20240617/drvrfile.c
+cfitsio-4.4.1-20240617/drvrgsiftp.c
+cfitsio-4.4.1-20240617/drvrgsiftp.h
+cfitsio-4.4.1-20240617/drvrmem.c
+cfitsio-4.4.1-20240617/drvrnet.c
+cfitsio-4.4.1-20240617/drvrsmem.c
+cfitsio-4.4.1-20240617/drvrsmem.h
+cfitsio-4.4.1-20240617/editcol.c
+cfitsio-4.4.1-20240617/edithdu.c
+cfitsio-4.4.1-20240617/eval.l
+cfitsio-4.4.1-20240617/eval.y
+cfitsio-4.4.1-20240617/eval_defs.h
+cfitsio-4.4.1-20240617/eval_f.c
+cfitsio-4.4.1-20240617/eval_l.c
+cfitsio-4.4.1-20240617/eval_tab.h
+cfitsio-4.4.1-20240617/eval_y.c
+cfitsio-4.4.1-20240617/f77.inc
+cfitsio-4.4.1-20240617/f77_wrap.h
+cfitsio-4.4.1-20240617/f77_wrap1.c
+cfitsio-4.4.1-20240617/f77_wrap2.c
+cfitsio-4.4.1-20240617/f77_wrap3.c
+cfitsio-4.4.1-20240617/f77_wrap4.c
+cfitsio-4.4.1-20240617/fits_hcompress.c
+cfitsio-4.4.1-20240617/fits_hdecompress.c
+cfitsio-4.4.1-20240617/fitscore.c
+cfitsio-4.4.1-20240617/fitsio.h
+cfitsio-4.4.1-20240617/fitsio2.h
+cfitsio-4.4.1-20240617/getcol.c
+cfitsio-4.4.1-20240617/getcolb.c
+cfitsio-4.4.1-20240617/getcold.c
+cfitsio-4.4.1-20240617/getcole.c
+cfitsio-4.4.1-20240617/getcoli.c
+cfitsio-4.4.1-20240617/getcolj.c
+cfitsio-4.4.1-20240617/getcolk.c
+cfitsio-4.4.1-20240617/getcoll.c
+cfitsio-4.4.1-20240617/getcols.c
+cfitsio-4.4.1-20240617/getcolsb.c
+cfitsio-4.4.1-20240617/getcolui.c
+cfitsio-4.4.1-20240617/getcoluj.c
+cfitsio-4.4.1-20240617/getcoluk.c
+cfitsio-4.4.1-20240617/getkey.c
+cfitsio-4.4.1-20240617/group.c
+cfitsio-4.4.1-20240617/group.h
+cfitsio-4.4.1-20240617/grparser.c
+cfitsio-4.4.1-20240617/grparser.h
+cfitsio-4.4.1-20240617/histo.c
+cfitsio-4.4.1-20240617/imcompress.c
+cfitsio-4.4.1-20240617/install-sh
+cfitsio-4.4.1-20240617/iraffits.c
+cfitsio-4.4.1-20240617/iter_a.c
+cfitsio-4.4.1-20240617/iter_a.f
+cfitsio-4.4.1-20240617/iter_a.fit
+cfitsio-4.4.1-20240617/iter_b.c
+cfitsio-4.4.1-20240617/iter_b.f
+cfitsio-4.4.1-20240617/iter_b.fit
+cfitsio-4.4.1-20240617/iter_c.c
+cfitsio-4.4.1-20240617/iter_c.f
+cfitsio-4.4.1-20240617/iter_c.fit
+cfitsio-4.4.1-20240617/iter_image.c
+cfitsio-4.4.1-20240617/iter_var.c
+cfitsio-4.4.1-20240617/longnam.h
+cfitsio-4.4.1-20240617/modkey.c
+cfitsio-4.4.1-20240617/pliocomp.c
+cfitsio-4.4.1-20240617/putcol.c
+cfitsio-4.4.1-20240617/putcolb.c
+cfitsio-4.4.1-20240617/putcold.c
+cfitsio-4.4.1-20240617/putcole.c
+cfitsio-4.4.1-20240617/putcoli.c
+cfitsio-4.4.1-20240617/putcolj.c
+cfitsio-4.4.1-20240617/putcolk.c
+cfitsio-4.4.1-20240617/putcoll.c
+cfitsio-4.4.1-20240617/putcols.c
+cfitsio-4.4.1-20240617/putcolsb.c
+cfitsio-4.4.1-20240617/putcolu.c
+cfitsio-4.4.1-20240617/putcolui.c
+cfitsio-4.4.1-20240617/putcoluj.c
+cfitsio-4.4.1-20240617/putcoluk.c
+cfitsio-4.4.1-20240617/putkey.c
+cfitsio-4.4.1-20240617/quantize.c
+cfitsio-4.4.1-20240617/region.c
+cfitsio-4.4.1-20240617/region.h
+cfitsio-4.4.1-20240617/ricecomp.c
+cfitsio-4.4.1-20240617/sample.tpl
+cfitsio-4.4.1-20240617/scalnull.c
+cfitsio-4.4.1-20240617/simplerng.c
+cfitsio-4.4.1-20240617/simplerng.h
+cfitsio-4.4.1-20240617/swapproc.c
+cfitsio-4.4.1-20240617/testf77.out
+cfitsio-4.4.1-20240617/testf77.std
+cfitsio-4.4.1-20240617/testprog.out
+cfitsio-4.4.1-20240617/testprog.std
+cfitsio-4.4.1-20240617/testprog.tpt
+cfitsio-4.4.1-20240617/vmsieee.c
+cfitsio-4.4.1-20240617/wcssub.c
+cfitsio-4.4.1-20240617/wcsutil.c
+cfitsio-4.4.1-20240617/winDumpExts.mak
+cfitsio-4.4.1-20240617/windumpexts.c
+cfitsio-4.4.1-20240617/zcompress.c
+cfitsio-4.4.1-20240617/zuncompress.c
+cfitsio-4.4.1-20240617/cfitsio.xcodeproj/project.pbxproj
+cfitsio-4.4.1-20240617/docs/cfitsio.pdf
+cfitsio-4.4.1-20240617/docs/cfitsio.ps
+cfitsio-4.4.1-20240617/docs/cfitsio.tex
+cfitsio-4.4.1-20240617/docs/cfitsio.toc
+cfitsio-4.4.1-20240617/docs/cfortran.doc
+cfitsio-4.4.1-20240617/docs/changes.txt
+cfitsio-4.4.1-20240617/docs/fitsio.pdf
+cfitsio-4.4.1-20240617/docs/fitsio.ps
+cfitsio-4.4.1-20240617/docs/fitsio.tex
+cfitsio-4.4.1-20240617/docs/fitsio.toc
+cfitsio-4.4.1-20240617/docs/fpackguide.odt
+cfitsio-4.4.1-20240617/docs/fpackguide.pdf
+cfitsio-4.4.1-20240617/docs/quick.pdf
+cfitsio-4.4.1-20240617/docs/quick.ps
+cfitsio-4.4.1-20240617/docs/quick.tex
+cfitsio-4.4.1-20240617/docs/quick.toc
+cfitsio-4.4.1-20240617/licenses/License.txt
+cfitsio-4.4.1-20240617/utilities/cookbook.c
+cfitsio-4.4.1-20240617/utilities/cookbook.f
+cfitsio-4.4.1-20240617/utilities/fitscopy.c
+cfitsio-4.4.1-20240617/utilities/fitsverify.c
+cfitsio-4.4.1-20240617/utilities/fpack.c
+cfitsio-4.4.1-20240617/utilities/fpack.h
+cfitsio-4.4.1-20240617/utilities/fpackutil.c
+cfitsio-4.4.1-20240617/utilities/ftverify.c
+cfitsio-4.4.1-20240617/utilities/funpack.c
+cfitsio-4.4.1-20240617/utilities/fverify.h
+cfitsio-4.4.1-20240617/utilities/fvrf_data.c
+cfitsio-4.4.1-20240617/utilities/fvrf_file.c
+cfitsio-4.4.1-20240617/utilities/fvrf_head.c
+cfitsio-4.4.1-20240617/utilities/fvrf_key.c
+cfitsio-4.4.1-20240617/utilities/fvrf_misc.c
+cfitsio-4.4.1-20240617/utilities/imcopy.c
+cfitsio-4.4.1-20240617/utilities/smem.c
+cfitsio-4.4.1-20240617/utilities/speed.c
+cfitsio-4.4.1-20240617/utilities/testf77.f
+cfitsio-4.4.1-20240617/utilities/testprog.c
+fitsio/__init__.py
+fitsio/fits_exceptions.py
+fitsio/fitsio_pywrap.c
+fitsio/fitslib.py
+fitsio/header.py
+fitsio/util.py
+fitsio.egg-info/PKG-INFO
+fitsio.egg-info/SOURCES.txt
+fitsio.egg-info/dependency_links.txt
+fitsio.egg-info/requires.txt
+fitsio.egg-info/top_level.txt
+fitsio/hdu/__init__.py
+fitsio/hdu/base.py
+fitsio/hdu/image.py
+fitsio/hdu/table.py
+fitsio/test_images/test_gzip_compressed_image.fits.fz
+fitsio/tests/__init__.py
+fitsio/tests/checks.py
+fitsio/tests/makedata.py
+fitsio/tests/test_empty_slice.py
+fitsio/tests/test_header.py
+fitsio/tests/test_header_junk.py
+fitsio/tests/test_image.py
+fitsio/tests/test_image_compression.py
+fitsio/tests/test_lib.py
+fitsio/tests/test_table.py
+fitsio/tests/test_warnings.py
+patches/Makefile.in.patch
+patches/README.md
+patches/build_cfitsio_patches.py
+patches/configure.in.patch
+patches/configure.patch
+patches/fitscore.c.patch
+patches/fitsio.h.patch
+patches/fitsio2.h.patch
+patches/putcols.c.patch
+zlib/LICENSE
+zlib/adler32.c
+zlib/crc32.c
+zlib/crc32.h
+zlib/deflate.c
+zlib/deflate.h
+zlib/infback.c
+zlib/inffast.c
+zlib/inffast.h
+zlib/inffixed.h
+zlib/inflate.c
+zlib/inflate.h
+zlib/inftrees.c
+zlib/inftrees.h
+zlib/trees.c
+zlib/trees.h
+zlib/uncompr.c
+zlib/zconf.h
+zlib/zlib.h
+zlib/zutil.c
+zlib/zutil.h
\ No newline at end of file
--- /dev/null
+# flake8: noqa
+"""
+A python library to read and write data to FITS files using cfitsio.
+See the docs at https://github.com/esheldon/fitsio for example
+usage.
+"""
+
+__version__ = '1.2.5'
+
+from . import fitslib
+
+from .fitslib import (
+ FITS,
+ read,
+ read_header,
+ read_scamp_head,
+ write,
+ READONLY,
+ READWRITE,
+
+ NOCOMPRESS,
+ RICE_1,
+ GZIP_1,
+ GZIP_2,
+ PLIO_1,
+ HCOMPRESS_1,
+
+ NO_DITHER,
+ SUBTRACTIVE_DITHER_1,
+ SUBTRACTIVE_DITHER_2,
+)
+
+from .header import FITSHDR, FITSRecord, FITSCard
+from .hdu import BINARY_TBL, ASCII_TBL, IMAGE_HDU
+
+from . import util
+from .util import cfitsio_version, FITSRuntimeWarning
+
+from .fits_exceptions import FITSFormatError
--- /dev/null
+class FITSFormatError(Exception):
+ """
+ Format error in FITS file
+ """
+ def __init__(self, value):
+ super(FITSFormatError, self).__init__(value)
+ self.value = value
+
+ def __str__(self):
+ return str(self.value)
--- /dev/null
+/*
+ * fitsio_pywrap.c
+ *
+ * This is a CPython wrapper for the cfitsio library.
+
+ Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include <string.h>
+#include <Python.h>
+#include "fitsio.h"
+#include "fitsio2.h"
+//#include "fitsio_pywrap_lists.h"
+#include <numpy/arrayobject.h>
+
+// this is not defined anywhere in cfitsio except in
+// the fits file structure
+#define CFITSIO_MAX_ARRAY_DIMS 99
+
+// not sure where this is defined in numpy...
+#define NUMPY_MAX_DIMS 32
+
+struct PyFITSObject {
+ PyObject_HEAD
+ fitsfile* fits;
+};
+
+#ifdef FITSIO_PYWRAP_ALWAYS_NONSTANDARD_STRINGS
+static int fits_use_standard_strings(void)
+{
+ return 0;
+}
+#else
+#ifndef _FITSIO_H_FITS_USE_STANDARD_STRINGS
+#define _FITSIO_H_FITS_USE_STANDARD_STRINGS
+int CFITS_API fits_use_standard_strings(void);
+#endif
+#endif
+
+// check unicode for python3, string for python2
+static
+int is_python_string(const PyObject* obj)
+{
+#if PY_MAJOR_VERSION >= 3
+ return PyUnicode_Check(obj) || PyBytes_Check(obj);
+#else
+ return PyUnicode_Check(obj) || PyString_Check(obj);
+#endif
+}
+
+
+/*
+ Ensure all elements of the null terminated string are ascii, replacing
+ non-ascii characters with a ?
+*/
+
+static void convert_to_ascii(char* str) {
+ size_t size=0, i=0;
+ int cval=0;
+
+ size = strlen(str);
+ for (i=0; i < size; i++) {
+ cval = (int)str[i];
+ if (cval < 0 || cval > 127) {
+ str[i] = '?';
+ }
+ }
+}
+
+/*
+ Replace non ascii characters with _
+*/
+static int convert_extname_to_ascii(char* str) {
+ int was_converted=0;
+ size_t size=0, i=0;
+ int cval=0;
+
+ size = strlen(str);
+ for (i=0; i < size; i++) {
+ cval = (int)str[i];
+
+ if (cval < 0 || cval > 127) {
+ was_converted = 1;
+ str[i] = '_';
+ }
+ }
+ return was_converted;
+}
+
+
+/*
+ Replace bad keyword characters (?, *, #) or non-ascii with valid _ characters.
+*/
+static int convert_keyword_to_allowed_ascii_template_and_nonascii_only(char *str) {
+ int isbad=0, was_converted=0;
+ size_t size=0, i=0;
+ int cval=0;
+
+ size = strlen(str);
+ for (i=0; i < size; i++) {
+ cval = (int)str[i];
+
+ isbad = (cval == '?') || (cval == '*') || (cval == '#');
+
+ if (isbad || cval < 32 || cval > 126) {
+ was_converted = 1;
+ str[i] = '_';
+ }
+ }
+ return was_converted;
+
+}
+
+/*
+return 1 if a keyword has non-standard FITS keyword characters.
+*/
+static int has_invalid_keyword_chars(char *str) {
+ int isbad=0;
+ size_t size=0, i=0;
+ int cval=0;
+
+ size = strlen(str);
+ for (i=0; i < size; i++) {
+ cval = (int)str[i];
+
+ isbad = !(
+ (cval >= 'A' && cval <= 'Z')
+ ||
+ (cval >= 'a' && cval <= 'z')
+ ||
+ (cval >= '0' && cval <= '9')
+ ||
+ (cval == '-')
+ ||
+ (cval == '_')
+ );
+ }
+ return isbad;
+}
+
+/*
+
+ get a string version of the object. New memory
+ is allocated and the receiver must clean it up.
+
+*/
+
+// unicode is common to python 2 and 3
+static char* get_unicode_as_string(PyObject* obj)
+{
+ PyObject* tmp=NULL;
+ char* strdata=NULL;
+ tmp = PyObject_CallMethod(obj,"encode",NULL);
+
+ strdata = strdup( PyBytes_AsString(tmp) );
+ Py_XDECREF(tmp);
+
+ return strdata;
+}
+
+static char* get_object_as_string(PyObject* obj)
+{
+ PyObject* format=NULL;
+ PyObject* args=NULL;
+ char* strdata=NULL;
+ PyObject* tmpobj1=NULL;
+
+ if (PyUnicode_Check(obj)) {
+
+ strdata=get_unicode_as_string(obj);
+
+ } else {
+
+#if PY_MAJOR_VERSION >= 3
+
+ if (PyBytes_Check(obj)) {
+ strdata = strdup( PyBytes_AsString(obj) );
+ } else {
+ PyObject* tmpobj2=NULL;
+ format = Py_BuildValue("s","%s");
+ // this is not a string object
+ args=PyTuple_New(1);
+
+ PyTuple_SetItem(args,0,obj);
+ tmpobj2 = PyUnicode_Format(format, args);
+ tmpobj1 = PyObject_CallMethod(tmpobj2,"encode",NULL);
+
+ Py_XDECREF(args);
+ Py_XDECREF(tmpobj2);
+
+ strdata = strdup( PyBytes_AsString(tmpobj1) );
+ Py_XDECREF(tmpobj1);
+ Py_XDECREF(format);
+ }
+
+#else
+ // convert to a string as needed
+ if (PyString_Check(obj)) {
+ strdata = strdup( PyString_AsString(obj) );
+ } else {
+ format = Py_BuildValue("s","%s");
+ args=PyTuple_New(1);
+
+ PyTuple_SetItem(args,0,obj);
+ tmpobj1= PyString_Format(format, args);
+
+ strdata = strdup( PyString_AsString(tmpobj1) );
+ Py_XDECREF(args);
+ Py_XDECREF(tmpobj1);
+ Py_XDECREF(format);
+ }
+#endif
+ }
+
+ return strdata;
+}
+
+static void set_ioerr_string_from_status(int status) {
+ char status_str[FLEN_STATUS], errmsg[FLEN_ERRMSG];
+ char message[1024];
+
+ int nleft=1024;
+
+ if (status) {
+ fits_get_errstatus(status, status_str); /* get the error description */
+
+ sprintf(message, "FITSIO status = %d: %s\n", status, status_str);
+
+ nleft -= strlen(status_str)+1;
+
+ while ( nleft > 0 && fits_read_errmsg(errmsg) ) { /* get error stack messages */
+ strncat(message, errmsg, nleft-1);
+ nleft -= strlen(errmsg)+1;
+ if (nleft >= 2) {
+ strncat(message, "\n", nleft-1);
+ }
+ nleft-=2;
+ }
+ PyErr_SetString(PyExc_IOError, message);
+ }
+ return;
+}
+
+/*
+ string list helper functions
+*/
+
+struct stringlist {
+ size_t size;
+ char** data;
+};
+
+static struct stringlist* stringlist_new(void) {
+ struct stringlist* slist=NULL;
+
+ slist = malloc(sizeof(struct stringlist));
+ slist->size = 0;
+ slist->data=NULL;
+ return slist;
+}
+// push a copy of the string onto the string list
+static void stringlist_push(struct stringlist* slist, const char* str) {
+ size_t newsize=0;
+ size_t i=0;
+
+ newsize = slist->size+1;
+ slist->data = realloc(slist->data, sizeof(char*)*newsize);
+ slist->size += 1;
+
+ i = slist->size-1;
+
+ slist->data[i] = strdup(str);
+}
+
+static void stringlist_push_size(struct stringlist* slist, size_t slen) {
+ size_t newsize=0;
+ size_t i=0;
+
+ newsize = slist->size+1;
+ slist->data = realloc(slist->data, sizeof(char*)*newsize);
+ slist->size += 1;
+
+ i = slist->size-1;
+
+ slist->data[i] = calloc(slen+1,sizeof(char));
+ //slist->data[i] = malloc(sizeof(char)*(slen+1));
+ //memset(slist->data[i], 0, slen+1);
+}
+static struct stringlist* stringlist_delete(struct stringlist* slist) {
+ if (slist != NULL) {
+ size_t i=0;
+ if (slist->data != NULL) {
+ for (i=0; i < slist->size; i++) {
+ free(slist->data[i]);
+ }
+ }
+ free(slist->data);
+ free(slist);
+ }
+ return NULL;
+}
+
+
+/*
+static void stringlist_print(struct stringlist* slist) {
+ size_t i=0;
+ if (slist == NULL) {
+ return;
+ }
+ for (i=0; i<slist->size; i++) {
+ printf(" slist[%ld]: %s\n", i, slist->data[i]);
+ }
+}
+*/
+
+
+static int stringlist_addfrom_listobj(struct stringlist* slist,
+ PyObject* listObj,
+ const char* listname) {
+ size_t size=0, i=0;
+ char* tmpstr=NULL;
+
+ if (!PyList_Check(listObj)) {
+ PyErr_Format(PyExc_ValueError, "Expected a list for %s.", listname);
+ return 1;
+ }
+ size = PyList_Size(listObj);
+
+ for (i=0; i<size; i++) {
+ PyObject* tmp = PyList_GetItem(listObj, i);
+ if (!is_python_string(tmp)) {
+ PyErr_Format(PyExc_ValueError,
+ "Expected only strings in %s list.", listname);
+ return 1;
+ }
+ tmpstr = get_object_as_string(tmp);
+ stringlist_push(slist, tmpstr);
+ free(tmpstr);
+ }
+ return 0;
+}
+
+static
+void add_double_to_dict(PyObject* dict, const char* key, double value) {
+ PyObject* tobj=NULL;
+ tobj=PyFloat_FromDouble(value);
+ PyDict_SetItemString(dict, key, tobj);
+ Py_XDECREF(tobj);
+}
+
+static
+void add_long_to_dict(PyObject* dict, const char* key, long value) {
+ PyObject* tobj=NULL;
+ tobj=PyLong_FromLong(value);
+ PyDict_SetItemString(dict, key, tobj);
+ Py_XDECREF(tobj);
+}
+
+static
+void add_long_long_to_dict(PyObject* dict, const char* key, long long value) {
+ PyObject* tobj=NULL;
+ tobj=PyLong_FromLongLong(value);
+ PyDict_SetItemString(dict, key, tobj);
+ Py_XDECREF(tobj);
+}
+
+static
+void add_string_to_dict(PyObject* dict, const char* key, const char* str) {
+ PyObject* tobj=NULL;
+ tobj=Py_BuildValue("s",str);
+ PyDict_SetItemString(dict, key, tobj);
+ Py_XDECREF(tobj);
+}
+
+static
+void add_none_to_dict(PyObject* dict, const char* key) {
+ PyDict_SetItemString(dict, key, Py_None);
+ Py_XINCREF(Py_None);
+}
+static
+void add_true_to_dict(PyObject* dict, const char* key) {
+ PyDict_SetItemString(dict, key, Py_True);
+ Py_XINCREF(Py_True);
+}
+static
+void add_false_to_dict(PyObject* dict, const char* key) {
+ PyDict_SetItemString(dict, key, Py_False);
+ Py_XINCREF(Py_False);
+}
+
+
+/*
+static
+void append_long_to_list(PyObject* list, long value) {
+ PyObject* tobj=NULL;
+ tobj=PyLong_FromLong(value);
+ PyList_Append(list, tobj);
+ Py_XDECREF(tobj);
+}
+*/
+
+static
+void append_long_long_to_list(PyObject* list, long long value) {
+ PyObject* tobj=NULL;
+ tobj=PyLong_FromLongLong(value);
+ PyList_Append(list, tobj);
+ Py_XDECREF(tobj);
+}
+
+/*
+static
+void append_string_to_list(PyObject* list, const char* str) {
+ PyObject* tobj=NULL;
+ tobj=Py_BuildValue("s",str);
+ PyList_Append(list, tobj);
+ Py_XDECREF(tobj);
+}
+*/
+
+
+
+static int
+PyFITSObject_init(struct PyFITSObject* self, PyObject *args, PyObject *kwds)
+{
+ char* filename;
+ int mode;
+ int status=0;
+ int create=0;
+
+ if (!PyArg_ParseTuple(args, (char*)"sii", &filename, &mode, &create)) {
+ return -1;
+ }
+
+ if (create) {
+ // create and open
+ if (fits_create_file(&self->fits, filename, &status)) {
+ set_ioerr_string_from_status(status);
+ return -1;
+ }
+ } else {
+ if (fits_open_file(&self->fits, filename, mode, &status)) {
+ set_ioerr_string_from_status(status);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+static PyObject *
+PyFITSObject_repr(struct PyFITSObject* self) {
+
+ if (self->fits != NULL) {
+ int status=0;
+ char filename[FLEN_FILENAME];
+ char repr[2056];
+
+ if (fits_file_name(self->fits, filename, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ sprintf(repr, "fits file: %s", filename);
+ return Py_BuildValue("s",repr);
+ } else {
+ return Py_BuildValue("s","none");
+ }
+}
+
+static PyObject *
+PyFITSObject_filename(struct PyFITSObject* self) {
+
+ if (self->fits != NULL) {
+ int status=0;
+ char filename[FLEN_FILENAME];
+ PyObject* fnameObj=NULL;
+ if (fits_file_name(self->fits, filename, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ fnameObj = Py_BuildValue("s",filename);
+ return fnameObj;
+ } else {
+ PyErr_SetString(PyExc_ValueError, "file is not open, cannot determine name");
+ return NULL;
+ }
+}
+
+
+
+static PyObject *
+PyFITSObject_close(struct PyFITSObject* self)
+{
+ int status=0;
+ if (fits_close_file(self->fits, &status)) {
+ self->fits=NULL;
+ /*
+ set_ioerr_string_from_status(status);
+ return NULL;
+ */
+ }
+ self->fits=NULL;
+ Py_RETURN_NONE;
+}
+
+
+
+static void
+PyFITSObject_dealloc(struct PyFITSObject* self)
+{
+ int status=0;
+ fits_close_file(self->fits, &status);
+#if PY_MAJOR_VERSION >= 3
+ // introduced in python 2.6
+ Py_TYPE(self)->tp_free((PyObject*)self);
+#else
+ // old way, removed in python 3
+ self->ob_type->tp_free((PyObject*)self);
+#endif
+}
+
+
+// this will need to be updated for array string columns.
+// I'm using a tcolumn* here, could cause problems
+static long get_groupsize(tcolumn* colptr) {
+ long gsize=0;
+ if (colptr->tdatatype == TSTRING) {
+ //gsize = colptr->twidth;
+ gsize = colptr->trepeat;
+ } else {
+ gsize = colptr->twidth*colptr->trepeat;
+ }
+ return gsize;
+}
+static npy_int64* get_int64_from_array(PyArrayObject* arr, npy_intp* ncols) {
+
+ npy_int64* colnums;
+ int npy_type=0, check=0;
+
+ if (!PyArray_Check(arr)) {
+ PyErr_SetString(PyExc_TypeError, "int64 array must be an array.");
+ return NULL;
+ }
+
+ npy_type = PyArray_TYPE(arr);
+
+ // on some platforms, creating an 'i8' array gives it a longlong
+ // dtype. Just make sure it is 8 bytes
+ check=
+ (npy_type == NPY_INT64)
+ |
+ (npy_type==NPY_LONGLONG && sizeof(npy_longlong)==sizeof(npy_int64));
+ if (!check) {
+ PyErr_Format(PyExc_TypeError,
+ "array must be an int64 array (%d), got %d.",
+ NPY_INT64,npy_type);
+ return NULL;
+ }
+ if (!PyArray_ISCONTIGUOUS(arr)) {
+ PyErr_SetString(PyExc_TypeError, "int64 array must be a contiguous.");
+ return NULL;
+ }
+
+ colnums = PyArray_DATA(arr);
+ *ncols = PyArray_SIZE(arr);
+
+ return colnums;
+}
+
+// move hdu by name and possibly version, return the hdu number
+static PyObject *
+PyFITSObject_movnam_hdu(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdutype=ANY_HDU; // means we don't care if its image or table
+ char* extname=NULL;
+ int extver=0; // zero means it is ignored
+ int hdunum=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"isi", &hdutype, &extname, &extver)) {
+ return NULL;
+ }
+
+ if (fits_movnam_hdu(self->fits, hdutype, extname, extver, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ fits_get_hdu_num(self->fits, &hdunum);
+ return PyLong_FromLong((long)hdunum);
+}
+
+
+
+static PyObject *
+PyFITSObject_movabs_hdu(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0, hdutype=0;
+ int status=0;
+ PyObject* hdutypeObj=NULL;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ hdutypeObj = PyLong_FromLong((long)hdutype);
+ return hdutypeObj;
+}
+
+// get info for the specified HDU
+static PyObject *
+PyFITSObject_get_hdu_info(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0, hdutype=0, ext=0, ignore_scaling=FALSE;
+ int status=0, tstatus=0, is_compressed=0;
+ PyObject* dict=NULL;
+
+ char extname[FLEN_VALUE];
+ char hduname[FLEN_VALUE];
+ int extver=0, hduver=0;
+
+ long long header_start;
+ long long data_start;
+ long long data_end;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"ii", &hdunum, &ignore_scaling)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (ignore_scaling == TRUE
+ && fits_set_bscale(self->fits, 1.0, 0.0, &status)) {
+ return NULL;
+ }
+
+ dict = PyDict_New();
+ ext=hdunum-1;
+
+ add_long_to_dict(dict, "hdunum", (long)hdunum);
+ add_long_to_dict(dict, "extnum", (long)ext);
+ add_long_to_dict(dict, "hdutype", (long)hdutype);
+
+
+ tstatus=0;
+ if (fits_read_key(self->fits, TSTRING, "EXTNAME", extname, NULL, &tstatus)==0) {
+ convert_extname_to_ascii(extname);
+ add_string_to_dict(dict, "extname", extname);
+ } else {
+ add_string_to_dict(dict, "extname", "");
+ }
+
+ tstatus=0;
+ if (fits_read_key(self->fits, TSTRING, "HDUNAME", hduname, NULL, &tstatus)==0) {
+ convert_extname_to_ascii(hduname);
+ add_string_to_dict(dict, "hduname", hduname);
+ } else {
+ add_string_to_dict(dict, "hduname", "");
+ }
+
+ tstatus=0;
+ if (fits_read_key(self->fits, TINT, "EXTVER", &extver, NULL, &tstatus)==0) {
+ add_long_to_dict(dict, "extver", (long)extver);
+ } else {
+ add_long_to_dict(dict, "extver", (long)0);
+ }
+
+ tstatus=0;
+ if (fits_read_key(self->fits, TINT, "HDUVER", &hduver, NULL, &tstatus)==0) {
+ add_long_to_dict(dict, "hduver", (long)hduver);
+ } else {
+ add_long_to_dict(dict, "hduver", (long)0);
+ }
+
+ tstatus=0;
+ is_compressed=fits_is_compressed_image(self->fits, &tstatus);
+ add_long_to_dict(dict, "is_compressed_image", (long)is_compressed);
+
+
+ // get byte offsets
+ if (0==fits_get_hduaddrll(self->fits, &header_start, &data_start, &data_end, &tstatus)) {
+ add_long_long_to_dict(dict, "header_start", (long)header_start);
+ add_long_long_to_dict(dict, "data_start", (long)data_start);
+ add_long_long_to_dict(dict, "data_end", (long)data_end);
+ } else {
+ add_long_long_to_dict(dict, "header_start", -1);
+ add_long_long_to_dict(dict, "data_start", -1);
+ add_long_long_to_dict(dict, "data_end", -1);
+ }
+
+
+ int ndims=0;
+ int maxdim=CFITSIO_MAX_ARRAY_DIMS;
+ LONGLONG dims[CFITSIO_MAX_ARRAY_DIMS];
+ if (hdutype == IMAGE_HDU) {
+ // move this into it's own func
+ int tstatus=0;
+ int bitpix=0;
+ int bitpix_equiv=0;
+ char comptype[20];
+ PyObject* dimsObj=PyList_New(0);
+ int i=0;
+
+ //if (fits_read_imghdrll(self->fits, maxdim, simple_p, &bitpix, &ndims,
+ // dims, pcount_p, gcount_p, extend_p, &status)) {
+ if (fits_get_img_paramll(self->fits, maxdim, &bitpix, &ndims, dims, &tstatus)) {
+ add_string_to_dict(dict,"error","could not determine image parameters");
+ } else {
+ add_long_to_dict(dict,"ndims",(long)ndims);
+ add_long_to_dict(dict,"img_type",(long)bitpix);
+
+ if (ignore_scaling == TRUE) {
+ // Get the raw type if scaling is being ignored.
+ fits_get_img_type(self->fits, &bitpix_equiv, &status);
+ } else {
+ fits_get_img_equivtype(self->fits, &bitpix_equiv, &status);
+ }
+
+ add_long_to_dict(dict,"img_equiv_type",(long)bitpix_equiv);
+
+ tstatus=0;
+ if (fits_read_key(self->fits, TSTRING, "ZCMPTYPE",
+ comptype, NULL, &tstatus)==0) {
+ convert_to_ascii(comptype);
+ add_string_to_dict(dict,"comptype",comptype);
+ } else {
+ add_none_to_dict(dict,"comptype");
+ }
+
+ for (i=0; i<ndims; i++) {
+ append_long_long_to_list(dimsObj, (long long)dims[i]);
+ }
+ PyDict_SetItemString(dict, "dims", dimsObj);
+ Py_XDECREF(dimsObj);
+
+ }
+
+ } else if (hdutype == BINARY_TBL) {
+ int tstatus=0;
+ LONGLONG nrows=0;
+ int ncols=0;
+ PyObject* colinfo = PyList_New(0);
+ int i=0,j=0;
+
+ fits_get_num_rowsll(self->fits, &nrows, &tstatus);
+ fits_get_num_cols(self->fits, &ncols, &tstatus);
+ add_long_long_to_dict(dict,"nrows",(long long)nrows);
+ add_long_to_dict(dict,"ncols",(long)ncols);
+
+ {
+ PyObject* d = NULL;
+ tcolumn* col=NULL;
+ struct stringlist* names=NULL;
+ struct stringlist* tforms=NULL;
+ names=stringlist_new();
+ tforms=stringlist_new();
+
+ for (i=0; i<ncols; i++) {
+ stringlist_push_size(names, 70);
+ stringlist_push_size(tforms, 70);
+ }
+ // just get the names: no other way to do it!
+ fits_read_btblhdrll(self->fits, ncols, NULL, NULL,
+ names->data, tforms->data,
+ NULL, NULL, NULL, &tstatus);
+
+ for (i=0; i<ncols; i++) {
+ d = PyDict_New();
+ int type=0;
+ LONGLONG repeat=0;
+ LONGLONG width=0;
+
+ convert_to_ascii(names->data[i]);
+ add_string_to_dict(d,"name",names->data[i]);
+ convert_to_ascii(tforms->data[i]);
+ add_string_to_dict(d,"tform",tforms->data[i]);
+
+ fits_get_coltypell(self->fits, i+1, &type, &repeat, &width, &tstatus);
+ add_long_to_dict(d,"type",(long)type);
+ add_long_long_to_dict(d,"repeat",(long long)repeat);
+ add_long_long_to_dict(d,"width",(long long)width);
+
+ fits_get_eqcoltypell(self->fits,i+1,&type,&repeat,&width, &tstatus);
+ add_long_to_dict(d,"eqtype",(long)type);
+
+ tstatus=0;
+ if (fits_read_tdimll(self->fits, i+1, maxdim, &ndims, dims,
+ &tstatus)) {
+ add_none_to_dict(d,"tdim");
+ } else {
+ PyObject* dimsObj=PyList_New(0);
+ for (j=0; j<ndims; j++) {
+ append_long_long_to_list(dimsObj, (long long)dims[j]);
+ }
+
+ PyDict_SetItemString(d, "tdim", dimsObj);
+ Py_XDECREF(dimsObj);
+ }
+
+ // using the struct, could cause problems
+ // actually, we can use ffgcprll to get this info, but will
+ // be redundant with some others above
+ col = &self->fits->Fptr->tableptr[i];
+ add_double_to_dict(d,"tscale",col->tscale);
+ add_double_to_dict(d,"tzero",col->tzero);
+
+ PyList_Append(colinfo, d);
+ Py_XDECREF(d);
+ }
+ names=stringlist_delete(names);
+ tforms=stringlist_delete(tforms);
+
+ PyDict_SetItemString(dict, "colinfo", colinfo);
+ Py_XDECREF(colinfo);
+ }
+ } else {
+ int tstatus=0;
+ LONGLONG nrows=0;
+ int ncols=0;
+ PyObject* colinfo = PyList_New(0);
+ int i=0,j=0;
+
+ fits_get_num_rowsll(self->fits, &nrows, &tstatus);
+ fits_get_num_cols(self->fits, &ncols, &tstatus);
+ add_long_long_to_dict(dict,"nrows",(long long)nrows);
+ add_long_to_dict(dict,"ncols",(long)ncols);
+
+ {
+ tcolumn* col=NULL;
+ struct stringlist* names=NULL;
+ struct stringlist* tforms=NULL;
+ names=stringlist_new();
+ tforms=stringlist_new();
+
+ for (i=0; i<ncols; i++) {
+ stringlist_push_size(names, 70);
+ stringlist_push_size(tforms, 70);
+ }
+ // just get the names: no other way to do it!
+
+ // rowlen nrows
+ fits_read_atblhdrll(self->fits, ncols, NULL, NULL,
+ // tfields tbcol units
+ NULL, names->data, NULL, tforms->data, NULL,
+ // extname
+ NULL, &tstatus);
+
+
+
+ for (i=0; i<ncols; i++) {
+ PyObject* d = PyDict_New();
+ int type=0;
+ LONGLONG repeat=0;
+ LONGLONG width=0;
+
+ convert_to_ascii(names->data[i]);
+ add_string_to_dict(d,"name",names->data[i]);
+ convert_to_ascii(tforms->data[i]);
+ add_string_to_dict(d,"tform",tforms->data[i]);
+
+ fits_get_coltypell(self->fits, i+1, &type, &repeat, &width, &tstatus);
+ add_long_to_dict(d,"type",(long)type);
+ add_long_long_to_dict(d,"repeat",(long long)repeat);
+ add_long_long_to_dict(d,"width",(long long)width);
+
+ fits_get_eqcoltypell(self->fits, i+1, &type, &repeat, &width, &tstatus);
+ add_long_to_dict(d,"eqtype",(long)type);
+
+ tstatus=0;
+ if (fits_read_tdimll(self->fits, i+1, maxdim, &ndims, dims,
+ &tstatus)) {
+ add_none_to_dict(dict,"tdim");
+ } else {
+ PyObject* dimsObj=PyList_New(0);
+ for (j=0; j<ndims; j++) {
+ append_long_long_to_list(dimsObj, (long long)dims[j]);
+ }
+
+ PyDict_SetItemString(d, "tdim", dimsObj);
+ Py_XDECREF(dimsObj);
+ }
+
+ // using the struct, could cause problems
+ // actually, we can use ffgcprll to get this info, but will
+ // be redundant with some others above
+ col = &self->fits->Fptr->tableptr[i];
+ add_double_to_dict(d,"tscale",col->tscale);
+ add_double_to_dict(d,"tzero",col->tzero);
+
+ PyList_Append(colinfo, d);
+ Py_XDECREF(d);
+ }
+ names=stringlist_delete(names);
+ tforms=stringlist_delete(tforms);
+
+ PyDict_SetItemString(dict, "colinfo", colinfo);
+ Py_XDECREF(colinfo);
+ }
+
+ }
+ return dict;
+}
+
+// get info for the specified HDU
+static PyObject *
+PyFITSObject_get_hdu_name_version(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0, hdutype=0;
+ int status=0;
+
+ char extname[FLEN_VALUE];
+ int extver=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ status=0;
+ if (fits_read_key(self->fits, TINT, "EXTVER", &extver, NULL, &status)!=0) {
+ extver=0;
+ }
+
+ status=0;
+ if (fits_read_key(self->fits, TSTRING, "EXTNAME", extname, NULL, &status)==0) {
+ return Py_BuildValue("si", extname, extver);
+ } else {
+ return Py_BuildValue("si", "", extver);
+ }
+}
+
+
+// this is the parameter that goes in the type for fits_write_col
+static int
+npy_to_fits_table_type(int npy_dtype, int write_bitcols) {
+
+ char mess[255];
+ switch (npy_dtype) {
+ case NPY_BOOL:
+ if (write_bitcols) {
+ return TBIT;
+ } else {
+ return TLOGICAL;
+ }
+ case NPY_UINT8:
+ return TBYTE;
+ case NPY_INT8:
+ return TSBYTE;
+ case NPY_UINT16:
+ return TUSHORT;
+ case NPY_INT16:
+ return TSHORT;
+ case NPY_UINT32:
+ if (sizeof(unsigned int) == sizeof(npy_uint32)) {
+ return TUINT;
+ } else if (sizeof(unsigned long) == sizeof(npy_uint32)) {
+ return TULONG;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine 4 byte unsigned integer type");
+ return -9999;
+ }
+ case NPY_INT32:
+ if (sizeof(int) == sizeof(npy_int32)) {
+ return TINT;
+ } else if (sizeof(long) == sizeof(npy_int32)) {
+ return TLONG;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine 4 byte integer type");
+ return -9999;
+ }
+
+ case NPY_INT64:
+ if (sizeof(long long) == sizeof(npy_int64)) {
+ return TLONGLONG;
+ } else if (sizeof(long) == sizeof(npy_int64)) {
+ return TLONG;
+ } else if (sizeof(int) == sizeof(npy_int64)) {
+ return TINT;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine 8 byte integer type");
+ return -9999;
+ }
+
+
+ case NPY_FLOAT32:
+ return TFLOAT;
+ case NPY_FLOAT64:
+ return TDOUBLE;
+
+ case NPY_COMPLEX64:
+ return TCOMPLEX;
+ case NPY_COMPLEX128:
+ return TDBLCOMPLEX;
+
+ case NPY_STRING:
+ return TSTRING;
+
+ case NPY_UINT64:
+ PyErr_SetString(PyExc_TypeError, "Unsigned 8 byte integer images are not supported by the FITS standard");
+ return -9999;
+
+ default:
+ sprintf(mess,"Unsupported numpy table datatype %d", npy_dtype);
+ PyErr_SetString(PyExc_TypeError, mess);
+ return -9999;
+ }
+
+ return 0;
+}
+
+
+
+static int
+npy_to_fits_image_types(int npy_dtype, int *fits_img_type, int *fits_datatype) {
+
+ char mess[255];
+ switch (npy_dtype) {
+ case NPY_UINT8:
+ *fits_img_type = BYTE_IMG;
+ *fits_datatype = TBYTE;
+ break;
+ case NPY_INT8:
+ *fits_img_type = SBYTE_IMG;
+ *fits_datatype = TSBYTE;
+ break;
+ case NPY_UINT16:
+ *fits_img_type = USHORT_IMG;
+ *fits_datatype = TUSHORT;
+ break;
+ case NPY_INT16:
+ *fits_img_type = SHORT_IMG;
+ *fits_datatype = TSHORT;
+ break;
+
+ case NPY_UINT32:
+ //*fits_img_type = ULONG_IMG;
+ if (sizeof(unsigned short) == sizeof(npy_uint32)) {
+ *fits_img_type = USHORT_IMG;
+ *fits_datatype = TUSHORT;
+ } else if (sizeof(unsigned int) == sizeof(npy_uint32)) {
+ // there is no UINT_IMG, so use ULONG_IMG
+ *fits_img_type = ULONG_IMG;
+ *fits_datatype = TUINT;
+ } else if (sizeof(unsigned long) == sizeof(npy_uint32)) {
+ *fits_img_type = ULONG_IMG;
+ *fits_datatype = TULONG;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine 4 byte unsigned integer type");
+ *fits_datatype = -9999;
+ return 1;
+ }
+ break;
+
+ case NPY_INT32:
+ //*fits_img_type = LONG_IMG;
+ if (sizeof(unsigned short) == sizeof(npy_uint32)) {
+ *fits_img_type = SHORT_IMG;
+ *fits_datatype = TINT;
+ } else if (sizeof(int) == sizeof(npy_int32)) {
+ // there is no UINT_IMG, so use ULONG_IMG
+ *fits_img_type = LONG_IMG;
+ *fits_datatype = TINT;
+ } else if (sizeof(long) == sizeof(npy_int32)) {
+ *fits_img_type = LONG_IMG;
+ *fits_datatype = TLONG;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine 4 byte integer type");
+ *fits_datatype = -9999;
+ return 1;
+ }
+ break;
+
+ case NPY_INT64:
+ if (sizeof(LONGLONG) == sizeof(npy_int64)) {
+ *fits_img_type = LONGLONG_IMG;
+ *fits_datatype = TLONGLONG;
+ } else if (sizeof(long) == sizeof(npy_int64)) {
+ *fits_img_type = LONG_IMG;
+ *fits_datatype = TLONG;
+ } else if (sizeof(int) == sizeof(npy_int64)) {
+ // there is no UINT_IMG, so use ULONG_IMG
+ *fits_img_type = LONG_IMG;
+ *fits_datatype = TINT;
+ } else if (sizeof(long long) == sizeof(npy_int64)) {
+ // we don't expect to get here
+ *fits_img_type = LONGLONG_IMG;
+ *fits_datatype = TLONGLONG;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine 8 byte integer type");
+ *fits_datatype = -9999;
+ return 1;
+ }
+ break;
+
+
+ case NPY_FLOAT32:
+ *fits_img_type = FLOAT_IMG;
+ *fits_datatype = TFLOAT;
+ break;
+ case NPY_FLOAT64:
+ *fits_img_type = DOUBLE_IMG;
+ *fits_datatype = TDOUBLE;
+ break;
+
+ case NPY_UINT64:
+ PyErr_SetString(PyExc_TypeError, "Unsigned 8 byte integer images are not supported by the FITS standard");
+ *fits_datatype = -9999;
+ return 1;
+ break;
+
+ default:
+ sprintf(mess,"Unsupported numpy image datatype %d", npy_dtype);
+ PyErr_SetString(PyExc_TypeError, mess);
+ *fits_datatype = -9999;
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+
+/*
+ * this is really only for reading variable length columns since we should be
+ * able to just read the bytes for normal columns
+ */
+static int fits_to_npy_table_type(int fits_dtype, int* isvariable) {
+
+ if (fits_dtype < 0) {
+ *isvariable=1;
+ } else {
+ *isvariable=0;
+ }
+
+ switch (abs(fits_dtype)) {
+ case TBIT:
+ return NPY_INT8;
+ case TLOGICAL: // literal T or F stored as char
+ return NPY_INT8;
+ case TBYTE:
+ return NPY_UINT8;
+ case TSBYTE:
+ return NPY_INT8;
+
+ case TUSHORT:
+ if (sizeof(unsigned short) == sizeof(npy_uint16)) {
+ return NPY_UINT16;
+ } else if (sizeof(unsigned short) == sizeof(npy_uint8)) {
+ return NPY_UINT8;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TUSHORT");
+ return -9999;
+ }
+ case TSHORT:
+ if (sizeof(short) == sizeof(npy_int16)) {
+ return NPY_INT16;
+ } else if (sizeof(short) == sizeof(npy_int8)) {
+ return NPY_INT8;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TSHORT");
+ return -9999;
+ }
+
+ case TUINT:
+ if (sizeof(unsigned int) == sizeof(npy_uint32)) {
+ return NPY_UINT32;
+ } else if (sizeof(unsigned int) == sizeof(npy_uint64)) {
+ return NPY_UINT64;
+ } else if (sizeof(unsigned int) == sizeof(npy_uint16)) {
+ return NPY_UINT16;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TUINT");
+ return -9999;
+ }
+ case TINT:
+ if (sizeof(int) == sizeof(npy_int32)) {
+ return NPY_INT32;
+ } else if (sizeof(int) == sizeof(npy_int64)) {
+ return NPY_INT64;
+ } else if (sizeof(int) == sizeof(npy_int16)) {
+ return NPY_INT16;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TINT");
+ return -9999;
+ }
+
+ case TULONG:
+ if (sizeof(unsigned long) == sizeof(npy_uint32)) {
+ return NPY_UINT32;
+ } else if (sizeof(unsigned long) == sizeof(npy_uint64)) {
+ return NPY_UINT64;
+ } else if (sizeof(unsigned long) == sizeof(npy_uint16)) {
+ return NPY_UINT16;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TULONG");
+ return -9999;
+ }
+ case TLONG:
+ if (sizeof(unsigned long) == sizeof(npy_int32)) {
+ return NPY_INT32;
+ } else if (sizeof(unsigned long) == sizeof(npy_int64)) {
+ return NPY_INT64;
+ } else if (sizeof(long) == sizeof(npy_int16)) {
+ return NPY_INT16;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TLONG");
+ return -9999;
+ }
+
+
+ case TLONGLONG:
+ if (sizeof(LONGLONG) == sizeof(npy_int64)) {
+ return NPY_INT64;
+ } else if (sizeof(LONGLONG) == sizeof(npy_int32)) {
+ return NPY_INT32;
+ } else if (sizeof(LONGLONG) == sizeof(npy_int16)) {
+ return NPY_INT16;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TLONGLONG");
+ return -9999;
+ }
+
+
+
+ case TFLOAT:
+ return NPY_FLOAT32;
+ case TDOUBLE:
+ return NPY_FLOAT64;
+
+ case TCOMPLEX:
+ return NPY_COMPLEX64;
+ case TDBLCOMPLEX:
+ return NPY_COMPLEX128;
+
+
+ case TSTRING:
+ return NPY_STRING;
+
+ default:
+ PyErr_Format(PyExc_TypeError,"Unsupported FITS table datatype %d", fits_dtype);
+ return -9999;
+ }
+
+ return 0;
+}
+
+
+
+static int create_empty_hdu(struct PyFITSObject* self)
+{
+ int status=0;
+ int bitpix=SHORT_IMG;
+ int naxis=0;
+ long* naxes=NULL;
+ if (fits_create_img(self->fits, bitpix, naxis, naxes, &status)) {
+ set_ioerr_string_from_status(status);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+// follows fits convention that return value is true
+// for failure
+//
+// exception strings are set internally
+//
+// length checking should happen in python
+//
+// note tile dims are written reverse order since
+// python orders C and fits orders Fortran
+static int set_compression(fitsfile *fits,
+ int comptype,
+ PyObject* tile_dims_obj,
+ int *status) {
+
+ npy_int64 *tile_dims_py=NULL;
+ long *tile_dims_fits=NULL;
+ npy_intp ndims=0, i=0;
+
+ // can be NOCOMPRESS (0)
+ if (fits_set_compression_type(fits, comptype, status)) {
+ set_ioerr_string_from_status(*status);
+ goto _set_compression_bail;
+ return 1;
+ }
+
+ if (tile_dims_obj != Py_None) {
+
+ tile_dims_py=get_int64_from_array((PyArrayObject *) tile_dims_obj, &ndims);
+ if (tile_dims_py==NULL) {
+ *status=1;
+ } else {
+ tile_dims_fits = calloc(ndims,sizeof(long));
+ if (!tile_dims_fits) {
+ PyErr_Format(PyExc_MemoryError, "failed to allocate %ld longs",
+ ndims);
+ goto _set_compression_bail;
+ }
+
+ for (i=0; i<ndims; i++) {
+ tile_dims_fits[ndims-i-1] = tile_dims_py[i];
+ }
+
+ fits_set_tile_dim(fits, ndims, tile_dims_fits, status);
+
+ free(tile_dims_fits);tile_dims_fits=NULL;
+ }
+ }
+
+_set_compression_bail:
+ return *status;
+}
+
+static int pyarray_get_ndim(PyArrayObject* arr) {
+ return arr->nd;
+}
+
+/*
+ Create an image extension, possible writing data as well.
+
+ We allow creating from dimensions rather than from the input image shape,
+ writing into the HDU later
+
+ It is useful to create the extension first so we can write keywords into the
+ header before adding data. This avoids moving the data if the header grows
+ too large.
+
+ However, on distributed file systems it can be more efficient to write
+ the data at this time due to slowness with updating the file in place.
+
+ */
+
+static PyObject *
+PyFITSObject_create_image_hdu(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int ndims=0;
+ long *dims=NULL;
+ int image_datatype=0; // fits type for image, AKA bitpix
+ int datatype=0; // type for the data we entered
+
+ int comptype=0; // same as NOCOMPRESS in newer cfitsio
+
+ PyObject *array_obj=NULL, *dims_obj=NULL, *tile_dims_obj=NULL;
+ PyArrayObject *array=NULL, *dims_array=NULL;
+
+ int npy_dtype=0, nkeys=0, write_data=0;
+ int i=0;
+ int status=0;
+
+ char* extname=NULL;
+ int extver=0;
+ float qlevel=0;
+ int qmethod=0;
+ int dither_seed=0;
+ float hcomp_scale=0;
+ int hcomp_smooth=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ static char *kwlist[] = {
+ "array","nkeys",
+ "dims",
+ "comptype",
+ "tile_dims",
+
+ "qlevel",
+ "qmethod",
+ "dither_seed",
+
+ "hcomp_scale",
+ "hcomp_smooth",
+
+ "extname",
+ "extver",
+ NULL,
+ };
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "Oi|OiOfiifisi", kwlist,
+ &array_obj, &nkeys,
+ &dims_obj,
+ &comptype,
+ &tile_dims_obj,
+
+ &qlevel,
+ &qmethod,
+ &dither_seed,
+
+ &hcomp_scale,
+ &hcomp_smooth,
+
+ &extname,
+ &extver)) {
+ goto create_image_hdu_cleanup;
+ }
+
+
+ if (array_obj == Py_None) {
+ if (create_empty_hdu(self)) {
+ return NULL;
+ }
+ } else {
+ array = (PyArrayObject *) array_obj;
+ if (!PyArray_Check(array)) {
+ PyErr_SetString(PyExc_TypeError, "input must be an array.");
+ goto create_image_hdu_cleanup;
+ }
+
+ npy_dtype = PyArray_TYPE(array);
+ if (npy_to_fits_image_types(npy_dtype, &image_datatype, &datatype)) {
+ goto create_image_hdu_cleanup;
+ }
+
+ if (PyArray_Check(dims_obj)) {
+ // get dims from input, which must be of type 'i8'
+ // this means we are not writing the array that was input,
+ // it is only used to determine the data type
+ dims_array = (PyArrayObject *) dims_obj;
+
+ npy_int64 *tptr=NULL, tmp=0;
+ ndims = PyArray_SIZE(dims_array);
+ dims = calloc(ndims,sizeof(long));
+ for (i=0; i<ndims; i++) {
+ tptr = (npy_int64 *) PyArray_GETPTR1(dims_array, i);
+ tmp = *tptr;
+ dims[ndims-i-1] = (long) tmp;
+ }
+ write_data=0;
+ } else {
+ // we get the dimensions from the array, which means we are going
+ // to write it as well
+ ndims = pyarray_get_ndim(array);
+ dims = calloc(ndims,sizeof(long));
+ for (i=0; i<ndims; i++) {
+ dims[ndims-i-1] = PyArray_DIM(array, i);
+ }
+ write_data=1;
+ }
+
+ // 0 means NOCOMPRESS but that wasn't defined in the bundled version of cfitsio
+ // if (comptype >= 0) {
+ if (comptype > 0) {
+ // exception strings are set internally
+ if (set_compression(self->fits, comptype, tile_dims_obj, &status)) {
+ goto create_image_hdu_cleanup;
+ }
+
+ if (fits_set_quantize_level(self->fits, qlevel, &status)) {
+ goto create_image_hdu_cleanup;
+ }
+
+ if (fits_set_quantize_method(self->fits, qmethod, &status)) {
+ goto create_image_hdu_cleanup;
+ }
+
+ // zero means to use the default (system clock).
+ if (dither_seed != 0) {
+ if (fits_set_dither_seed(self->fits, dither_seed, &status)) {
+ goto create_image_hdu_cleanup;
+ }
+ }
+
+ if (comptype == HCOMPRESS_1) {
+
+ if (fits_set_hcomp_scale(self->fits, hcomp_scale, &status)) {
+ goto create_image_hdu_cleanup;
+ }
+ if (fits_set_hcomp_smooth(self->fits, hcomp_smooth, &status)) {
+ goto create_image_hdu_cleanup;
+ }
+
+ }
+ }
+
+ if (fits_create_img(self->fits, image_datatype, ndims, dims, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_image_hdu_cleanup;
+ }
+
+
+ }
+ if (extname != NULL) {
+ if (strlen(extname) > 0) {
+
+ // comments are NULL
+ if (fits_update_key_str(self->fits, "EXTNAME", extname, NULL, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_image_hdu_cleanup;
+ }
+ if (extver > 0) {
+ if (fits_update_key_lng(self->fits, "EXTVER", (LONGLONG) extver, NULL, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_image_hdu_cleanup;
+ }
+ }
+ }
+ }
+
+ if (nkeys > 0) {
+ if (fits_set_hdrsize(self->fits, nkeys, &status) ) {
+ set_ioerr_string_from_status(status);
+ goto create_image_hdu_cleanup;
+ }
+ }
+
+ if (write_data) {
+ int firstpixel=1;
+ LONGLONG nelements = 0;
+ void* data=NULL;
+ nelements = PyArray_SIZE(array);
+ data = PyArray_DATA(array);
+ if (fits_write_img(self->fits, datatype, firstpixel, nelements, data, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_image_hdu_cleanup;
+ }
+ }
+
+ // this does a full close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_image_hdu_cleanup;
+ }
+
+
+create_image_hdu_cleanup:
+
+ if (status != 0) {
+ return NULL;
+ }
+
+ free(dims); dims=NULL;
+ Py_RETURN_NONE;
+}
+
+
+// reshape the image to specified dims
+// the input array must be of type int64
+static PyObject *
+PyFITSObject_reshape_image(struct PyFITSObject* self, PyObject* args) {
+
+ int status=0;
+ int hdunum=0, hdutype=0;
+ PyObject *dims_obj=NULL;
+ PyArrayObject *dims_array=NULL;
+ LONGLONG dims[CFITSIO_MAX_ARRAY_DIMS]={0};
+ LONGLONG dims_orig[CFITSIO_MAX_ARRAY_DIMS]={0};
+ int ndims=0, ndims_orig=0;
+ npy_int64 dim=0;
+ npy_intp i=0;
+ int bitpix=0, maxdim=CFITSIO_MAX_ARRAY_DIMS;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"iO", &hdunum, &dims_obj)) {
+ return NULL;
+ }
+ dims_array = (PyArrayObject *) dims_obj;
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // existing image params, just to get bitpix
+ if (fits_get_img_paramll(self->fits, maxdim, &bitpix, &ndims_orig, dims_orig, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ ndims = PyArray_SIZE(dims_array);
+ for (i=0; i<ndims; i++) {
+ dim= *(npy_int64 *) PyArray_GETPTR1(dims_array, i);
+ dims[i] = (LONGLONG) dim;
+ }
+
+ if (fits_resize_imgll(self->fits, bitpix, ndims, dims, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+// write the image to an existing HDU created using create_image_hdu
+// dims are not checked
+static PyObject *
+PyFITSObject_write_image(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+ LONGLONG nelements=1;
+ PY_LONG_LONG firstpixel_py=0;
+ LONGLONG firstpixel=0;
+ int image_datatype=0; // fits type for image, AKA bitpix
+ int datatype=0; // type for the data we entered
+
+ PyObject* array_obj=NULL;
+ PyArrayObject* array=NULL;
+ void* data=NULL;
+ int npy_dtype=0;
+ int status=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"iOL", &hdunum, &array_obj, &firstpixel_py)) {
+ return NULL;
+ }
+ array = (PyArrayObject *) array_obj;
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (!PyArray_Check(array)) {
+ PyErr_SetString(PyExc_TypeError, "input must be an array.");
+ return NULL;
+ }
+
+ npy_dtype = PyArray_TYPE(array);
+ if (npy_to_fits_image_types(npy_dtype, &image_datatype, &datatype)) {
+ return NULL;
+ }
+
+
+ data = PyArray_DATA(array);
+ nelements = PyArray_SIZE(array);
+ firstpixel = (LONGLONG) firstpixel_py;
+ if (fits_write_img(self->fits, datatype, firstpixel, nelements, data, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this is a full file close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+/*
+ * Write tdims from the list. The list must be the expected length.
+ * Entries must be strings or None; if None the tdim is not written.
+ *
+ * The keys are written as TDIM{colnum}
+ */
+static int
+add_tdims_from_listobj(fitsfile* fits, PyObject* tdimObj, int ncols) {
+ int status=0, i=0;
+ size_t size=0;
+ char keyname[20];
+ int colnum=0;
+ PyObject* tmp=NULL;
+ char* tdim=NULL;
+
+ if (tdimObj == NULL || tdimObj == Py_None) {
+ // it is ok for it to be empty
+ return 0;
+ }
+
+ if (!PyList_Check(tdimObj)) {
+ PyErr_SetString(PyExc_ValueError, "Expected a list for tdims");
+ return 1;
+ }
+
+ size = PyList_Size(tdimObj);
+ if (size != (size_t)ncols) {
+ PyErr_Format(PyExc_ValueError, "Expected %d elements in tdims list, got %ld", ncols, size);
+ return 1;
+ }
+
+ for (i=0; i<ncols; i++) {
+ colnum=i+1;
+ tmp = PyList_GetItem(tdimObj, i);
+ if (tmp != Py_None) {
+ if (!is_python_string(tmp)) {
+ PyErr_SetString(PyExc_ValueError, "Expected only strings or None for tdim");
+ return 1;
+ }
+
+ sprintf(keyname, "TDIM%d", colnum);
+
+ tdim = get_object_as_string(tmp);
+ fits_write_key(fits, TSTRING, keyname, tdim, NULL, &status);
+ free(tdim);
+
+ if (status) {
+ set_ioerr_string_from_status(status);
+ return 1;
+ }
+ }
+ }
+
+
+ return 0;
+}
+
+
+// create a new table structure. No physical rows are added yet.
+static PyObject *
+PyFITSObject_create_table_hdu(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int table_type=0, nkeys=0;
+ int nfields=0;
+ LONGLONG nrows=0; // start empty
+
+ static char *kwlist[] = {
+ "table_type","nkeys", "ttyp","tform",
+ "tunit", "tdim", "extname", "extver", NULL};
+ // these are all strings
+ PyObject* ttypObj=NULL;
+ PyObject* tformObj=NULL;
+ PyObject* tunitObj=NULL; // optional
+ PyObject* tdimObj=NULL; // optional
+
+ // these must be freed
+ struct stringlist* ttyp=NULL;
+ struct stringlist* tform=NULL;
+ struct stringlist* tunit=NULL;
+ //struct stringlist* tdim=stringlist_new();
+ char* extname=NULL;
+ char* extname_use=NULL;
+ int extver=0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOO|OOsi", kwlist,
+ &table_type, &nkeys, &ttypObj, &tformObj, &tunitObj, &tdimObj, &extname, &extver)) {
+ return NULL;
+ }
+
+ ttyp=stringlist_new();
+ tform=stringlist_new();
+ tunit=stringlist_new();
+ if (stringlist_addfrom_listobj(ttyp, ttypObj, "names")) {
+ status=99;
+ goto create_table_cleanup;
+ }
+
+ if (stringlist_addfrom_listobj(tform, tformObj, "formats")) {
+ status=99;
+ goto create_table_cleanup;
+ }
+
+ if (tunitObj != NULL && tunitObj != Py_None) {
+ if (stringlist_addfrom_listobj(tunit, tunitObj,"units")) {
+ status=99;
+ goto create_table_cleanup;
+ }
+ }
+
+ if (extname != NULL) {
+ if (strlen(extname) > 0) {
+ extname_use = extname;
+ }
+ }
+ nfields = ttyp->size;
+ if ( fits_create_tbl(self->fits, table_type, nrows, nfields,
+ ttyp->data, tform->data, tunit->data, extname_use, &status) ) {
+ set_ioerr_string_from_status(status);
+ goto create_table_cleanup;
+ }
+
+ if (add_tdims_from_listobj(self->fits, tdimObj, nfields)) {
+ status=99;
+ goto create_table_cleanup;
+ }
+
+ if (extname_use != NULL) {
+ if (extver > 0) {
+
+ if (fits_update_key_lng(self->fits, "EXTVER", (LONGLONG) extver, NULL, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_table_cleanup;
+ }
+ }
+ }
+
+ if (nkeys > 0) {
+ if (fits_set_hdrsize(self->fits, nkeys, &status) ) {
+ set_ioerr_string_from_status(status);
+ goto create_table_cleanup;
+ }
+ }
+
+ // this does a full close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ goto create_table_cleanup;
+ }
+
+create_table_cleanup:
+ ttyp = stringlist_delete(ttyp);
+ tform = stringlist_delete(tform);
+ tunit = stringlist_delete(tunit);
+ //tdim = stringlist_delete(tdim);
+
+
+ if (status != 0) {
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+
+// create a new table structure. No physical rows are added yet.
+static PyObject *
+PyFITSObject_insert_col(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+ int colnum=0;
+
+ int hdutype=0;
+
+ static char *kwlist[] = {"hdunum","colnum","ttyp","tform","tdim", NULL};
+ // these are all strings
+ char* ttype=NULL; // field name
+ char* tform=NULL; // format
+ PyObject* tdimObj=NULL; // optional, a list of len 1
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiss|O", kwlist,
+ &hdunum, &colnum, &ttype, &tform, &tdimObj)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_insert_col(self->fits, colnum, ttype, tform, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // OK if dims are not sent
+ if (tdimObj != NULL && tdimObj != Py_None) {
+ PyObject* tmp=NULL;
+ char* tdim=NULL;
+ char keyname[20];
+
+ sprintf(keyname, "TDIM%d", colnum);
+ tmp = PyList_GetItem(tdimObj, 0);
+
+ tdim = get_object_as_string(tmp);
+ fits_write_key(self->fits, TSTRING, keyname, tdim, NULL, &status);
+ free(tdim);
+
+ if (status) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ }
+
+ // this does a full close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+
+
+// No error checking performed here
+static
+int write_string_column(
+ fitsfile *fits, /* I - FITS file pointer */
+ int colnum, /* I - number of column to write (1 = 1st col) */
+ LONGLONG firstrow, /* I - first row to write (1 = 1st row) */
+ LONGLONG firstelem, /* I - first vector element to write (1 = 1st) */
+ LONGLONG nelem, /* I - number of strings to write */
+ char *data,
+ int *status) { /* IO - error status */
+
+ LONGLONG i=0;
+ LONGLONG twidth=0;
+ // need to create a char** representation of the data, just point back
+ // into the data array at string width offsets. the fits_write_col_str
+ // takes care of skipping between fields.
+ char* cdata=NULL;
+ char** strdata=NULL;
+
+ // using struct def here, could cause problems
+ twidth = fits->Fptr->tableptr[colnum-1].twidth;
+
+ strdata = malloc(nelem*sizeof(char*));
+ if (strdata == NULL) {
+ PyErr_SetString(PyExc_MemoryError, "could not allocate temporary string pointers");
+ *status = 99;
+ return 1;
+ }
+ cdata = (char* ) data;
+ for (i=0; i<nelem; i++) {
+ strdata[i] = &cdata[twidth*i];
+ }
+
+ if( fits_write_col_str(fits, colnum, firstrow, firstelem, nelem, strdata, status)) {
+ set_ioerr_string_from_status(*status);
+ free(strdata);
+ return 1;
+ }
+
+
+ free(strdata);
+
+ return 0;
+}
+
+
+// write a column, starting at firstrow. On the python side, the firstrow kwd
+// should default to 1.
+// You can append rows using firstrow = nrows+1
+/*
+static PyObject *
+PyFITSObject_write_column(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+ int colnum=0;
+ int write_bitcols=0;
+ PyObject* array=NULL;
+
+ void* data=NULL;
+ PY_LONG_LONG firstrow_py=0;
+ LONGLONG firstrow=1;
+ LONGLONG firstelem=1;
+ LONGLONG nelem=0;
+ int npy_dtype=0;
+ int fits_dtype=0;
+
+ static char *kwlist[] = {"hdunum","colnum","array","firstrow","write_bitcols", NULL};
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOLi",
+ kwlist, &hdunum, &colnum, &array, &firstrow_py, &write_bitcols)) {
+ return NULL;
+ }
+ firstrow = (LONGLONG) firstrow_py;
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ if (!PyArray_Check(array)) {
+ PyErr_SetString(PyExc_ValueError,"only arrays can be written to columns");
+ return NULL;
+ }
+
+ npy_dtype = PyArray_TYPE(array);
+ fits_dtype = npy_to_fits_table_type(npy_dtype, write_bitcols);
+ if (fits_dtype == -9999) {
+ return NULL;
+ }
+ if (fits_dtype == TLOGICAL) {
+ int tstatus=0, ttype=0;
+ LONGLONG trepeat=0, twidth=0;
+ // if the column exists and is declared TBIT we will write
+ // that way instead
+ if (fits_get_coltypell(self->fits, colnum,
+ &ttype, &trepeat, &twidth, &tstatus)==0) {
+ // if we don't get here its because the column doesn't exist
+ // yet and that's ok
+ if (ttype==TBIT) {
+ fits_dtype=TBIT;
+ }
+ }
+ }
+
+
+
+ data = PyArray_DATA(array);
+ nelem = PyArray_SIZE(array);
+
+ if (fits_dtype == TSTRING) {
+
+ // this is my wrapper for strings
+ if (write_string_column(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ } else if (fits_dtype == TBIT) {
+ if (fits_write_col_bit(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ } else {
+ if( fits_write_col(self->fits, fits_dtype, colnum, firstrow, firstelem, nelem, data, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ }
+
+ // this is a full file close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ Py_RETURN_NONE;
+}
+*/
+
+static PyObject *
+PyFITSObject_write_columns(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+ int write_bitcols=0;
+ //void **data_ptrs=NULL;
+ PyObject* colnum_list=NULL;
+ PyObject* array_list=NULL;
+ PyObject *tmp_obj=NULL;
+ PyArrayObject *tmp_array=NULL;
+
+ Py_ssize_t ncols=0;
+
+ void* data=NULL;
+ PY_LONG_LONG firstrow_py=0;
+ LONGLONG firstrow=1, thisrow=0;
+ LONGLONG firstelem=1;
+ LONGLONG nelem=0;
+ LONGLONG *nperrow=NULL;
+ int npy_dtype=0;
+ int *fits_dtypes=NULL;
+ int *is_string=NULL, *colnums=NULL;
+ void **array_ptrs=NULL;
+
+ npy_intp ndim=0, *dims=NULL;
+ Py_ssize_t irow=0, icol=0, j=0;;
+
+ static char *kwlist[] = {"hdunum","colnums","arraylist","firstrow","write_bitcols", NULL};
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iOOLi",
+ kwlist, &hdunum, &colnum_list, &array_list, &firstrow_py, &write_bitcols)) {
+ return NULL;
+ }
+ firstrow = (LONGLONG) firstrow_py;
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ if (!PyList_Check(colnum_list)) {
+ PyErr_SetString(PyExc_ValueError,"colnums must be a list");
+ return NULL;
+ }
+ if (!PyList_Check(array_list)) {
+ PyErr_SetString(PyExc_ValueError,"colnums must be a list");
+ return NULL;
+ }
+ ncols = PyList_Size(colnum_list);
+ if (ncols == 0) {
+ goto _fitsio_pywrap_write_columns_bail;
+ }
+ if (ncols != PyList_Size(array_list)) {
+ PyErr_Format(PyExc_ValueError,"colnum and array lists not same size: %ld/%ld",
+ ncols, PyList_Size(array_list));
+ }
+
+ // from here on we'll have some temporary arrays we have to free
+ is_string = calloc(ncols, sizeof(int));
+ colnums = calloc(ncols, sizeof(int));
+ array_ptrs = calloc(ncols, sizeof(void*));
+ nperrow = calloc(ncols, sizeof(LONGLONG));
+ fits_dtypes = calloc(ncols, sizeof(int));
+
+ for (icol=0; icol<ncols; icol++) {
+
+ tmp_obj = PyList_GetItem(colnum_list,icol);
+#if PY_MAJOR_VERSION >= 3
+ colnums[icol] = 1+(int) PyLong_AsLong(tmp_obj);
+#else
+ colnums[icol] = 1+(int) PyInt_AsLong(tmp_obj);
+#endif
+
+ tmp_array = (PyArrayObject *) PyList_GetItem(array_list, icol);
+ npy_dtype = PyArray_TYPE(tmp_array);
+
+ fits_dtypes[icol] = npy_to_fits_table_type(npy_dtype, write_bitcols);
+ if (fits_dtypes[icol] == -9999) {
+ status=1;
+ goto _fitsio_pywrap_write_columns_bail;
+ }
+ if (fits_dtypes[icol] == TLOGICAL) {
+ int tstatus=0, ttype=0;
+ LONGLONG trepeat=0, twidth=0;
+ // if the column exists and is declared TBIT we will write
+ // that way instead
+ if (fits_get_coltypell(self->fits, colnums[icol],
+ &ttype, &trepeat, &twidth, &tstatus)==0) {
+ // if we don't get here its because the column doesn't exist
+ // yet and that's ok
+ if (ttype==TBIT) {
+ fits_dtypes[icol]=TBIT;
+ }
+ }
+ }
+
+ if (fits_dtypes[icol]==TSTRING) {
+ is_string[icol] = 1;
+ }
+ ndim = PyArray_NDIM(tmp_array);
+ dims = PyArray_DIMS(tmp_array);
+ if (icol==0) {
+ nelem = dims[0];
+ } else {
+ if (dims[0] != nelem) {
+ PyErr_Format(PyExc_ValueError,
+ "not all entries have same row count, "
+ "%lld/%ld", nelem,dims[0]);
+ status=1;
+ goto _fitsio_pywrap_write_columns_bail;
+ }
+ }
+
+ array_ptrs[icol] = tmp_array;
+
+ nperrow[icol] = 1;
+ for (j=1; j<ndim; j++) {
+ nperrow[icol] *= dims[j];
+ }
+ }
+
+ for (irow=0; irow<nelem; irow++) {
+ thisrow = firstrow + irow;
+ for (icol=0; icol<ncols; icol++) {
+ data=PyArray_GETPTR1(array_ptrs[icol], irow);
+ if (is_string[icol]) {
+ if (write_string_column(self->fits,
+ colnums[icol],
+ thisrow,
+ firstelem,
+ nperrow[icol],
+ (char*)data,
+ &status)) {
+ set_ioerr_string_from_status(status);
+ goto _fitsio_pywrap_write_columns_bail;
+ }
+
+ } else if (fits_dtypes[icol] == TBIT) {
+ if (fits_write_col_bit(self->fits,
+ colnums[icol],
+ thisrow,
+ firstelem,
+ nperrow[icol],
+ data,
+ &status)) {
+ set_ioerr_string_from_status(status);
+ goto _fitsio_pywrap_write_columns_bail;
+ }
+ } else {
+ if( fits_write_col(self->fits,
+ fits_dtypes[icol],
+ colnums[icol],
+ thisrow,
+ firstelem,
+ nperrow[icol],
+ data,
+ &status)) {
+ set_ioerr_string_from_status(status);
+ goto _fitsio_pywrap_write_columns_bail;
+ }
+ }
+ }
+ }
+ /*
+ nelem = PyArray_SIZE(array);
+
+ if (fits_dtype == TSTRING) {
+
+ // this is my wrapper for strings
+ if (write_string_column(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ } else {
+ if( fits_write_col(self->fits, fits_dtype, colnum, firstrow, firstelem, nelem, data, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ }
+
+ // this is a full file close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ */
+
+_fitsio_pywrap_write_columns_bail:
+ free(is_string); is_string=NULL;
+ free(colnums); colnums=NULL;
+ free(array_ptrs); array_ptrs=NULL;
+ free(nperrow); nperrow=NULL;
+ free(fits_dtypes); fits_dtypes=NULL;
+ if (status != 0) {
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+
+
+
+
+
+// No error checking performed here
+static
+int write_var_string_column(
+ fitsfile *fits, /* I - FITS file pointer */
+ int colnum, /* I - number of column to write (1 = 1st col) */
+ LONGLONG firstrow, /* I - first row to write (1 = 1st row) */
+ PyArrayObject* array,
+ int *status) { /* IO - error status */
+
+ LONGLONG firstelem=1; // ignored
+ LONGLONG nelem=1; // ignored
+ npy_intp nrows=0;
+ npy_intp i=0;
+ char* ptr=NULL;
+ int res=0;
+
+ PyObject* el=NULL;
+ char* strdata=NULL;
+ char* strarr[1];
+
+
+ nrows = PyArray_SIZE(array);
+ for (i=0; i<nrows; i++) {
+ ptr = PyArray_GetPtr(array, &i);
+ el = PyArray_GETITEM(array, ptr);
+
+ strdata=get_object_as_string(el);
+
+ // just a container
+ strarr[0] = strdata;
+ res=fits_write_col_str(fits, colnum,
+ firstrow+i, firstelem, nelem,
+ strarr, status);
+
+ free(strdata);
+ if(res > 0) {
+ goto write_var_string_column_cleanup;
+ }
+ }
+
+write_var_string_column_cleanup:
+
+ if (*status > 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * No error checking performed here
+ */
+static
+int write_var_num_column(
+ fitsfile *fits, /* I - FITS file pointer */
+ int colnum, /* I - number of column to write (1 = 1st col) */
+ LONGLONG firstrow, /* I - first row to write (1 = 1st row) */
+ int fits_dtype,
+ PyArrayObject* array,
+ int *status) { /* IO - error status */
+
+ LONGLONG firstelem=1;
+ npy_intp nelem=0;
+ npy_intp nrows=0;
+ npy_intp i=0;
+ PyObject* el=NULL;
+ PyArrayObject* el_array=NULL;
+ void* data=NULL;
+ void* ptr=NULL;
+
+ int npy_dtype=0, isvariable=0;
+
+ int mindepth=1, maxdepth=0;
+ PyObject* context=NULL;
+ int requirements =
+ NPY_C_CONTIGUOUS
+ | NPY_ALIGNED
+ | NPY_NOTSWAPPED
+ | NPY_ELEMENTSTRIDES;
+
+ int res=0;
+
+ npy_dtype = fits_to_npy_table_type(fits_dtype, &isvariable);
+
+ nrows = PyArray_SIZE(array);
+ for (i=0; i<nrows; i++) {
+ ptr = PyArray_GetPtr((PyArrayObject*) array, &i);
+ el = PyArray_GETITEM(array, ptr);
+
+ // a copy is only made if needed
+ el_array = (PyArrayObject *) PyArray_CheckFromAny(
+ el, PyArray_DescrFromType(npy_dtype),
+ mindepth, maxdepth,
+ requirements, context
+ );
+ if (el_array == NULL) {
+ // error message will already be set
+ return 1;
+ }
+
+ nelem = PyArray_SIZE(el_array);
+ data = PyArray_DATA(el_array);
+ res = fits_write_col(fits, abs(fits_dtype), colnum,
+ firstrow+i, firstelem, (LONGLONG) nelem, data, status);
+ Py_XDECREF(el_array);
+
+ if(res > 0) {
+ set_ioerr_string_from_status(*status);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+
+
+/*
+ * write a variable length column, starting at firstrow. On the python side,
+ * the firstrow kwd should default to 1. You can append rows using firstrow =
+ * nrows+1
+ *
+ * The input array should be of type NPY_OBJECT, and the elements
+ * should be either all strings or numpy arrays of the same type
+ */
+
+static PyObject *
+PyFITSObject_write_var_column(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+ int colnum=0;
+ PyObject* array_obj=NULL;
+ PyArrayObject* array=NULL;
+
+ PY_LONG_LONG firstrow_py=0;
+ LONGLONG firstrow=1;
+ int npy_dtype=0;
+ int fits_dtype=0;
+
+ static char *kwlist[] = {"hdunum","colnum","array","firstrow", NULL};
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOL",
+ kwlist, &hdunum, &colnum, &array_obj, &firstrow_py)) {
+ return NULL;
+ }
+ firstrow = (LONGLONG) firstrow_py;
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ if (!PyArray_Check(array_obj)) {
+ PyErr_SetString(PyExc_ValueError,"only arrays can be written to columns");
+ return NULL;
+ }
+ array = (PyArrayObject *) array_obj;
+
+ npy_dtype = PyArray_TYPE(array);
+ if (npy_dtype != NPY_OBJECT) {
+ PyErr_SetString(PyExc_TypeError,"only object arrays can be written to variable length columns");
+ return NULL;
+ }
+
+ // determine the fits dtype for this column. We will use this to get data
+ // from the array for writing
+ if (fits_get_eqcoltypell(self->fits, colnum, &fits_dtype, NULL, NULL, &status) > 0) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_dtype == -TSTRING) {
+ if (write_var_string_column(self->fits, colnum, firstrow, array, &status)) {
+ if (status != 0) {
+ set_ioerr_string_from_status(status);
+ }
+ return NULL;
+ }
+ } else {
+ if (write_var_num_column(self->fits, colnum, firstrow, fits_dtype, array, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ }
+
+ // this is a full file close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ Py_RETURN_NONE;
+}
+
+
+/*
+ case for writing an entire record
+*/
+static PyObject *
+PyFITSObject_write_record(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* cardin=NULL;
+ char card[FLEN_CARD];
+
+ if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &cardin)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ // card not null terminated, so we copy everything. GCC will
+ // warn about this
+ strncpy(card, cardin, FLEN_CARD);
+
+ if (fits_write_record(self->fits, card, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_string_key(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* keyname=NULL;
+ char* value=NULL;
+ char* comment=NULL;
+ char* comment_in=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"isss", &hdunum, &keyname, &value, &comment_in)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (strlen(comment_in) > 0) {
+ comment=comment_in;
+ }
+
+ if (fits_update_key_longstr(self->fits, keyname, value, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyFITSObject_write_double_key(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ int decimals=-15;
+
+ char* keyname=NULL;
+ double value=0;
+ char* comment=NULL;
+ char* comment_in=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"isds", &hdunum, &keyname, &value, &comment_in)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (strlen(comment_in) > 0) {
+ comment=comment_in;
+ }
+
+ if (fits_update_key_dbl(self->fits, keyname, value, decimals, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyFITSObject_write_long_long_key(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* keyname=NULL;
+ long long value=0;
+ char* comment=NULL;
+ char* comment_in=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"isLs", &hdunum, &keyname, &value, &comment_in)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (strlen(comment_in) > 0) {
+ comment=comment_in;
+ }
+
+ if (fits_update_key_lng(self->fits, keyname, (LONGLONG) value, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyFITSObject_write_logical_key(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* keyname=NULL;
+ int value=0;
+ char* comment=NULL;
+ char* comment_in=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"isis", &hdunum, &keyname, &value, &comment_in)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (strlen(comment_in) > 0) {
+ comment=comment_in;
+ }
+
+ if (fits_update_key_log(self->fits, keyname, value, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_comment(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* comment=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &comment)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_write_comment(self->fits, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_history(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* history=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &history)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_write_history(self->fits, history, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+// ADW: Adapted from ffpcom and ffphis in putkey.c
+static
+int fits_write_continue( fitsfile *fptr, /* I - FITS file pointer */
+ const char *cont, /* I - continue string */
+ int *status) /* IO - error status */
+/*
+ Write 1 or more CONTINUE keywords. If the history string is too
+ long to fit on a single keyword (72 chars) then it will automatically
+ be continued on multiple CONTINUE keywords.
+*/
+{
+ char card[FLEN_CARD];
+ int len, ii;
+
+ if (*status > 0) /* inherit input status value if > 0 */
+ return(*status);
+
+ len = strlen(cont);
+ ii = 0;
+
+ for (; len > 0; len -= 72)
+ {
+ strcpy(card, "CONTINUE");
+ strncat(card, &cont[ii], 72);
+ ffprec(fptr, card, status);
+ ii += 72;
+ }
+
+ return(*status);
+}
+
+// let python do the conversions
+static PyObject *
+PyFITSObject_write_continue(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* value=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &value)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_write_continue(self->fits, value, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+static PyObject *
+PyFITSObject_write_undefined_key(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ char* keyname=NULL;
+ char* comment=NULL;
+ char* comment_in=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"iss", &hdunum, &keyname, &comment_in)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (strlen(comment_in) > 0) {
+ comment=comment_in;
+ }
+
+ if (fits_update_key_null(self->fits, keyname, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does not close and reopen
+ if (fits_flush_buffer(self->fits, 0, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+
+/*
+ insert a set of rows
+*/
+
+static PyObject *
+PyFITSObject_insert_rows(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+
+ int hdutype=0;
+ PY_LONG_LONG firstrow_py=0, nrows_py=0;
+ LONGLONG firstrow=0, nrows=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"iLL",
+ &hdunum, &firstrow_py, &nrows_py)) {
+ return NULL;
+ }
+
+ firstrow = (LONGLONG) firstrow_py;
+ nrows = (LONGLONG) nrows_py;
+
+ if (nrows <= 0) {
+ // nothing to do, just return
+ Py_RETURN_NONE;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_insert_rows(self->fits, firstrow, nrows, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does a full close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+/*
+
+ delete a range of rows
+
+ input stop is like a python slice, so exclusive, but 1-offset
+ rather than 0-offset
+*/
+
+static PyObject *
+PyFITSObject_delete_row_range(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+
+ int hdutype=0;
+ PY_LONG_LONG slice_start_py=0, slice_stop_py=0;
+ LONGLONG slice_start=0, slice_stop=0, nrows=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"iLL",
+ &hdunum, &slice_start_py, &slice_stop_py)) {
+ return NULL;
+ }
+
+ slice_start = (LONGLONG) slice_start_py;
+ slice_stop = (LONGLONG) slice_stop_py;
+ nrows = slice_stop - slice_start;
+
+ if (nrows <= 0) {
+ // nothing to do, just return
+ Py_RETURN_NONE;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_delete_rows(self->fits, slice_start, nrows, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does a full close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+/*
+
+ delete a specific set of rows, 1-offset
+
+ no type checking is applied to the rows
+*/
+
+static PyObject *
+PyFITSObject_delete_rows(struct PyFITSObject* self, PyObject* args, PyObject* kwds) {
+ int status=0;
+ int hdunum=0;
+
+ int hdutype=0;
+ PyObject *rows_obj=NULL;
+ PyArrayObject *rows_array=NULL;
+ LONGLONG *rows=NULL, nrows=0;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_ValueError, "fits file is NULL");
+ return NULL;
+ }
+
+ if (!PyArg_ParseTuple(args, (char*)"iO",
+ &hdunum, &rows_obj)) {
+ return NULL;
+ }
+ rows_array = (PyArrayObject *) rows_obj;
+
+ rows = (LONGLONG *) PyArray_DATA(rows_array);
+ nrows = PyArray_SIZE(rows_array);
+ if (nrows <= 0) {
+ Py_RETURN_NONE;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_delete_rowlistll(self->fits, rows, nrows, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this does a full close and reopen
+ if (fits_flush_file(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+
+
+/*
+ * read a single, entire column from an ascii table into the input array. This
+ * version uses the standard read column instead of our by-bytes version.
+ *
+ * A number of assumptions are made, such as that columns are scalar, which
+ * is true for ascii.
+ */
+
+static int read_ascii_column_all(fitsfile* fits, int colnum, PyArrayObject* array, int* status) {
+
+ int npy_dtype=0;
+ int fits_dtype=0;
+
+ npy_intp nelem=0;
+ LONGLONG firstelem=1;
+ LONGLONG firstrow=1;
+ int* anynul=NULL;
+ void* nulval=0;
+ char* nulstr=" ";
+ void* data=NULL;
+ char* cdata=NULL;
+
+ npy_dtype = PyArray_TYPE(array);
+ fits_dtype = npy_to_fits_table_type(npy_dtype,0);
+
+ nelem = PyArray_SIZE(array);
+
+ if (fits_dtype == TSTRING) {
+ npy_intp i=0;
+ LONGLONG rownum=0;
+
+ for (i=0; i<nelem; i++) {
+ cdata = PyArray_GETPTR1(array, i);
+ rownum = (LONGLONG) (1+i);
+ if (fits_read_col_str(fits,colnum,rownum,firstelem,1,nulstr,&cdata,anynul,status) > 0) {
+ return 1;
+ }
+ }
+
+ /*
+
+ LONGLONG twidth=0;
+ char** strdata=NULL;
+
+ cdata = (char*) PyArray_DATA(array);
+
+ strdata=malloc(nelem*sizeof(char*));
+ if (NULL==strdata) {
+ PyErr_SetString(PyExc_MemoryError, "could not allocate temporary string pointers");
+ *status = 99;
+ return 1;
+
+ }
+
+
+ twidth=fits->Fptr->tableptr[colnum-1].twidth;
+ for (i=0; i<nelem; i++) {
+ //strdata[i] = &cdata[twidth*i];
+ // this 1-d assumption works because array fields are not allowedin ascii
+ strdata[i] = (char*) PyArray_GETPTR1(array, i);
+ }
+
+ if (fits_read_col_str(fits,colnum,firstrow,firstelem,nelem,nulstr,strdata,anynul,status) > 0) {
+ free(strdata);
+ return 1;
+ }
+
+ free(strdata);
+ */
+
+ } else {
+ data=PyArray_DATA(array);
+ if (fits_read_col(fits,fits_dtype,colnum,firstrow,firstelem,nelem,nulval,data,anynul,status) > 0) {
+ return 1;
+ }
+ }
+
+ return 0;
+
+}
+static int read_ascii_column_byrow(
+ fitsfile* fits,
+ int colnum,
+ PyArrayObject* array,
+ PyArrayObject* rows,
+ PyArrayObject* sortind,
+ int* status
+)
+{
+
+ int npy_dtype=0;
+ int fits_dtype=0;
+
+ npy_intp nelem=0;
+ LONGLONG firstelem=1;
+ LONGLONG rownum=0;
+ npy_int64 si=0;
+ npy_intp nrows=-1;
+
+ int* anynul=NULL;
+ void* nulval=0;
+ char* nulstr=" ";
+ void* data=NULL;
+ char* cdata=NULL;
+
+ int dorows=0;
+
+ npy_intp i=0;
+
+ npy_dtype = PyArray_TYPE(array);
+ fits_dtype = npy_to_fits_table_type(npy_dtype,0);
+
+ nelem = PyArray_SIZE(array);
+
+
+ if ((PyObject *) rows != Py_None) {
+ dorows=1;
+ nrows = PyArray_SIZE(rows);
+ if (nrows != nelem) {
+ PyErr_Format(PyExc_ValueError,
+ "input array[%ld] and rows[%ld] have different size", nelem,nrows);
+ return 1;
+ }
+ }
+
+ for (i=0; i<nrows; i++) {
+ if (dorows) {
+ si = *(npy_int64*) PyArray_GETPTR1(sortind, i);
+ rownum = (LONGLONG)( *(npy_int64*) PyArray_GETPTR1(rows, si) );
+ rownum += 1;
+ } else {
+ rownum = (LONGLONG) (1+i);
+ }
+ // assuming 1-D
+ data = PyArray_GETPTR1(array, si);
+ if (fits_dtype == TSTRING) {
+ cdata = (char* ) data;
+ if (fits_read_col_str(fits,colnum,rownum,firstelem,1,nulstr,&cdata,anynul,status) > 0) {
+ return 1;
+ }
+ } else {
+ if (fits_read_col(fits,fits_dtype,colnum,rownum,firstelem,1,nulval,data,anynul,status) > 0) {
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+static int read_ascii_column(
+ fitsfile* fits,
+ int colnum,
+ PyArrayObject* array,
+ PyArrayObject* rows,
+ PyArrayObject* sortind,
+ int* status
+)
+{
+
+ int ret=0;
+ if ((PyObject *) rows != Py_None || !PyArray_ISCONTIGUOUS(array)) {
+ ret = read_ascii_column_byrow(
+ fits, colnum, array, rows, sortind, status
+ );
+ } else {
+ ret = read_ascii_column_all(fits, colnum, array, status);
+ }
+
+ return ret;
+}
+
+
+
+
+
+// read a subset of rows for the input column
+// the row array is assumed to be unique and sorted.
+static int read_binary_column(
+ fitsfile* fits,
+ int colnum,
+ npy_intp nrows,
+ npy_int64* rows,
+ npy_int64* sortind,
+ void* vdata,
+ npy_intp stride,
+ int* status) {
+
+ FITSfile* hdu=NULL;
+ tcolumn* colptr=NULL;
+ LONGLONG file_pos=0, irow=0;
+ npy_int64 row=0, si=0;
+
+ LONGLONG repeat=0;
+ LONGLONG width=0;
+
+ // use char for pointer arith. It's actually ok to use void as char but
+ // this is just in case.
+ char *data=NULL, *ptr=NULL;
+
+ data = (char *) vdata;
+
+ // using struct defs here, could cause problems
+ hdu = fits->Fptr;
+ colptr = hdu->tableptr + (colnum-1);
+
+ repeat = colptr->trepeat;
+ width = colptr->tdatatype == TSTRING ? 1 : colptr->twidth;
+
+ for (irow=0; irow<nrows; irow++) {
+ if (rows != NULL) {
+ si = sortind[irow];
+ row = rows[si];
+ } else {
+ row = irow;
+ }
+
+ ptr = data + si * stride;
+
+ file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol;
+ ffmbyt(fits, file_pos, REPORT_EOF, status);
+ if (ffgbytoff(fits, width, repeat, 0, (void*)ptr, status)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+
+
+/*
+ * read from a column into an input array
+ */
+static PyObject *
+PyFITSObject_read_column(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+ int colnum=0;
+
+ FITSfile* hdu=NULL;
+ int status=0;
+
+ PyObject* array_obj=NULL, *rows_obj=NULL, *sortind_obj=NULL;
+ PyArrayObject *array=NULL, *rows_array=NULL, *sortind_array=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"iiOOO",
+ &hdunum, &colnum, &array_obj, &rows_obj, &sortind_obj)) {
+ return NULL;
+ }
+
+ array = (PyArrayObject *) array_obj;
+ rows_array = (PyArrayObject *) rows_obj;
+ sortind_array = (PyArrayObject *) sortind_obj;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // using struct defs here, could cause problems
+ hdu = self->fits->Fptr;
+ if (hdutype == IMAGE_HDU) {
+ PyErr_SetString(PyExc_RuntimeError, "Cannot yet read columns from an IMAGE_HDU");
+ return NULL;
+ }
+ if (colnum < 1 || colnum > hdu->tfield) {
+ PyErr_SetString(PyExc_RuntimeError, "requested column is out of bounds");
+ return NULL;
+ }
+
+
+ if (hdutype == ASCII_TBL) {
+ if (read_ascii_column(self->fits, colnum, array, rows_array, sortind_array, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ } else {
+ void* data=PyArray_DATA(array);
+ npy_intp nrows=0, nsortind=0;
+ npy_int64* rows=NULL, *sortind=NULL;
+ npy_intp stride=PyArray_STRIDE(array,0);
+ if (rows_obj == Py_None) {
+ nrows = hdu->numrows;
+ } else {
+ rows = get_int64_from_array(rows_array, &nrows);
+ sortind = get_int64_from_array(sortind_array, &nsortind);
+ }
+
+ if (read_binary_column(self->fits, colnum, nrows, rows, sortind, data, stride, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+
+/*
+ * Free all the elements in the python list as well as the list itself
+ */
+static void free_all_python_list(PyObject* list) {
+ if (PyList_Check(list)) {
+ Py_ssize_t i=0;
+ for (i=0; i<PyList_Size(list); i++) {
+ Py_XDECREF(PyList_GetItem(list,i));
+ }
+ }
+ Py_XDECREF(list);
+}
+
+static PyObject*
+read_var_string(fitsfile* fits, int colnum, LONGLONG row, LONGLONG nchar, int* status) {
+ LONGLONG firstelem=1;
+ char* str=NULL;
+ char* strarr[1];
+ PyObject* stringObj=NULL;
+ void* nulval=0;
+ int* anynul=NULL;
+
+ str=calloc(nchar+1,sizeof(char));
+ if (str == NULL) {
+ PyErr_Format(PyExc_MemoryError,
+ "Could not allocate string of size %lld", nchar);
+ return NULL;
+ }
+
+ strarr[0] = str;
+ if (fits_read_col(fits,TSTRING,colnum,row,firstelem,nchar,nulval,strarr,anynul,status) > 0) {
+ goto read_var_string_cleanup;
+ }
+#if PY_MAJOR_VERSION >= 3
+ // bytes
+ stringObj = Py_BuildValue("y",str);
+#else
+ stringObj = Py_BuildValue("s",str);
+#endif
+ if (NULL == stringObj) {
+ PyErr_Format(PyExc_MemoryError,
+ "Could not allocate py string of size %lld", nchar);
+ goto read_var_string_cleanup;
+ }
+
+read_var_string_cleanup:
+ free(str);
+
+ return stringObj;
+}
+static PyObject*
+read_var_nums(fitsfile* fits, int colnum, LONGLONG row, LONGLONG nelem,
+ int fits_dtype, int npy_dtype, int* status) {
+ LONGLONG firstelem=1;
+ PyArrayObject* array=NULL;
+ void* nulval=0;
+ int* anynul=NULL;
+ npy_intp dims[1];
+ int fortran=0;
+ void* data=NULL;
+
+
+ dims[0] = nelem;
+ array = (PyArrayObject *) PyArray_ZEROS(1, dims, npy_dtype, fortran);
+ if (array == NULL) {
+ PyErr_Format(PyExc_MemoryError,
+ "Could not allocate array type %d size %lld",npy_dtype,nelem);
+ return NULL;
+ }
+ data = PyArray_DATA(array);
+ if (fits_read_col(fits,abs(fits_dtype),colnum,row,firstelem,nelem,nulval,data,anynul,status) > 0) {
+ Py_XDECREF(array);
+ return NULL;
+ }
+
+ return (PyObject *) array;
+}
+/*
+ * read a variable length column as a list of arrays
+ * what about strings?
+ */
+static PyObject *
+PyFITSObject_read_var_column_as_list(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int colnum=0;
+ PyObject* rows_obj=NULL, *sortind_obj=NULL;
+ PyArrayObject* rows_array=NULL, *sortind_array=NULL;
+
+ int hdutype=0;
+ int ncols=0;
+ const npy_int64* rows=NULL, *sortind=NULL;
+ LONGLONG nrows=0;
+ int get_all_rows=0;
+
+ int status=0, tstatus=0;
+
+ int fits_dtype=0;
+ int npy_dtype=0;
+ int isvariable=0;
+ LONGLONG repeat=0;
+ LONGLONG width=0;
+ LONGLONG offset=0;
+ LONGLONG i=0;
+ LONGLONG row=0;
+ npy_int64 si = 0;
+
+ PyObject* listObj=NULL;
+ PyObject* tempObj=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"iiOO", &hdunum, &colnum, &rows_obj, &sortind_obj)) {
+ return NULL;
+ }
+ rows_array = (PyArrayObject *) rows_obj;
+ sortind_array = (PyArrayObject *) sortind_obj;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (hdutype == IMAGE_HDU) {
+ PyErr_SetString(PyExc_RuntimeError, "Cannot yet read columns from an IMAGE_HDU");
+ return NULL;
+ }
+ // using struct defs here, could cause problems
+ fits_get_num_cols(self->fits, &ncols, &status);
+ if (colnum < 1 || colnum > ncols) {
+ PyErr_SetString(PyExc_RuntimeError, "requested column is out of bounds");
+ return NULL;
+ }
+
+ if (fits_get_coltypell(self->fits, colnum, &fits_dtype, &repeat, &width, &status) > 0) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ npy_dtype = fits_to_npy_table_type(fits_dtype, &isvariable);
+ if (npy_dtype < 0) {
+ return NULL;
+ }
+ if (!isvariable) {
+ PyErr_Format(PyExc_TypeError,"Column %d not a variable length %d", colnum, fits_dtype);
+ return NULL;
+ }
+
+ if (rows_obj == Py_None) {
+ fits_get_num_rowsll(self->fits, &nrows, &tstatus);
+ get_all_rows=1;
+ } else {
+ npy_intp tnrows=0, nsortind=0;
+ rows = (const npy_int64*) get_int64_from_array(rows_array, &tnrows);
+ sortind = (const npy_int64*) get_int64_from_array(sortind_array, &nsortind);
+ nrows = (LONGLONG) tnrows;
+ get_all_rows = 0;
+ }
+
+ listObj = PyList_New(0);
+
+ for (i=0; i<nrows; i++) {
+ tempObj=NULL;
+
+ if (get_all_rows) {
+ row = i+1;
+ } else {
+ si = sortind[i];
+ row = (LONGLONG) (rows[si]+1);
+ }
+
+ // repeat holds how many elements are in this row
+ if (fits_read_descriptll(self->fits, colnum, row, &repeat, &offset, &status) > 0) {
+ goto read_var_column_cleanup;
+ }
+
+ if (fits_dtype == -TSTRING) {
+ tempObj = read_var_string(self->fits,colnum,row,repeat,&status);
+ } else {
+ tempObj = read_var_nums(self->fits,colnum,row,repeat,
+ fits_dtype,npy_dtype,&status);
+ }
+ if (tempObj == NULL) {
+ tstatus=1;
+ goto read_var_column_cleanup;
+ }
+ PyList_Append(listObj, tempObj);
+ Py_XDECREF(tempObj);
+ }
+
+
+read_var_column_cleanup:
+
+ if (status != 0 || tstatus != 0) {
+ Py_XDECREF(tempObj);
+ free_all_python_list(listObj);
+ if (status != 0) {
+ set_ioerr_string_from_status(status);
+ }
+ return NULL;
+ }
+
+ return listObj;
+}
+
+
+// read specified columns and rows
+static int read_binary_rec_columns(
+ fitsfile* fits,
+ npy_intp ncols,
+ npy_int64* colnums,
+ npy_intp nrows,
+ npy_int64* rows,
+ npy_int64* sortind,
+ PyArrayObject *array,
+ int* status
+) {
+ FITSfile* hdu=NULL;
+ tcolumn* colptr=NULL;
+ LONGLONG file_pos=0;
+ npy_intp col=0;
+ npy_int64 colnum=0;
+ char* ptr=NULL;
+
+ npy_intp irow=0;
+ npy_int64 row=0, si=0;
+
+ LONGLONG gsize=0; // number of bytes in column
+ LONGLONG repeat=0;
+ LONGLONG width=0;
+
+ // using struct defs here, could cause problems
+ hdu = fits->Fptr;
+
+ for (irow=0; irow < nrows; irow++) {
+ if (rows != NULL) {
+ si = sortind[irow];
+ row = rows[si];
+ } else {
+ si = irow;
+ row = irow;
+ }
+
+ ptr = (char *) PyArray_GETPTR1(array, si);
+ for (col=0; col < ncols; col++) {
+
+ colnum = colnums[col];
+ colptr = hdu->tableptr + (colnum-1);
+
+ repeat = colptr->trepeat;
+ width = colptr->tdatatype == TSTRING ? 1 : colptr->twidth;
+
+ file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol;
+
+ if (colptr->tdatatype == TBIT) {
+ if (fits_read_col_bit(fits, colnum, row+1, 1, repeat, ptr, status)) {
+ return 1;
+ }
+ } else {
+ // can just do one status check, since status are inherited.
+ ffmbyt(fits, file_pos, REPORT_EOF, status);
+ if (ffgbytoff(fits, width, repeat, 0, (void*)ptr, status)) {
+ return 1;
+ }
+ }
+
+ gsize = repeat * width;
+ ptr += gsize;
+
+ }
+ }
+
+ return 0;
+}
+
+
+
+// python method for reading specified columns and rows
+static PyObject *
+PyFITSObject_read_columns_as_rec(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+ npy_intp ncols=0;
+ npy_int64* colnums=NULL;
+ FITSfile* hdu=NULL;
+
+ int status=0;
+
+ PyObject *columns_obj=NULL, *array_obj=NULL, *rows_obj=NULL, *sortind_obj=NULL;
+
+ npy_intp nrows, nsortind;
+ npy_int64* rows=NULL, *sortind=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"iOOOO",
+ &hdunum, &columns_obj, &array_obj, &rows_obj, &sortind_obj)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ goto recread_columns_cleanup;
+ }
+
+ if (hdutype == IMAGE_HDU) {
+ PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+ return NULL;
+ }
+
+ colnums = get_int64_from_array((PyArrayObject *) columns_obj, &ncols);
+ if (colnums == NULL) {
+ return NULL;
+ }
+
+ hdu = self->fits->Fptr;
+
+ if (rows_obj == Py_None) {
+ nrows = hdu->numrows;
+ } else {
+ rows = get_int64_from_array((PyArrayObject *) rows_obj, &nrows);
+ if (rows == NULL) {
+ return NULL;
+ }
+ sortind = get_int64_from_array((PyArrayObject *) sortind_obj, &nsortind);
+ if (sortind == NULL) {
+ return NULL;
+ }
+ }
+ if (read_binary_rec_columns(
+ self->fits, ncols, colnums,
+ nrows, rows, sortind, (PyArrayObject *) array_obj, &status)) {
+ goto recread_columns_cleanup;
+ }
+
+recread_columns_cleanup:
+
+ if (status != 0) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+/*
+ * read specified columns and rows
+ *
+ * Move by offset instead of just groupsize; this allows us to read into a
+ * recarray while skipping some fields, e.g. variable length array fields, to
+ * be read separately.
+ *
+ * If rows is NULL, then nrows are read consecutively.
+ */
+
+static int read_columns_as_rec_byoffset(
+ fitsfile* fits,
+ npy_intp ncols,
+ const npy_int64* colnums, // columns to read from file
+ const npy_int64* field_offsets, // offsets of corresponding fields within array
+ npy_intp nrows,
+ const npy_int64* rows,
+ const npy_int64* sortind,
+ char* data,
+ npy_intp recsize,
+ int* status) {
+
+ FITSfile* hdu=NULL;
+ tcolumn* colptr=NULL;
+ LONGLONG file_pos=0;
+ npy_intp col=0;
+ npy_int64 colnum=0;
+
+ char* ptr=NULL;
+
+ int get_all_rows=1;
+ npy_intp irow=0;
+ npy_int64 row=0, si=0;
+
+ long groupsize=0; // number of bytes in column
+ long ngroups=1; // number to read, one for row-by-row reading
+ long group_gap=0; // gap between groups, zero since we aren't using it
+
+ if (rows != NULL) {
+ get_all_rows=0;
+ }
+
+ // using struct defs here, could cause problems
+ hdu = fits->Fptr;
+ for (irow=0; irow < nrows; irow++) {
+ if (get_all_rows) {
+ row = irow;
+ si = irow;
+ } else {
+ si = sortind[irow];
+ row = rows[si];
+ }
+ for (col=0; col < ncols; col++) {
+
+ // point to this field in the array, allows for skipping
+ ptr = data + si*recsize + field_offsets[col];
+
+ colnum = colnums[col];
+ colptr = hdu->tableptr + (colnum-1);
+
+ groupsize = get_groupsize(colptr);
+
+ file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol;
+
+ // can just do one status check, since status are inherited.
+ ffmbyt(fits, file_pos, REPORT_EOF, status);
+ if (ffgbytoff(fits, groupsize, ngroups, group_gap, (void*) ptr, status)) {
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+
+
+
+
+
+/* python method for reading specified columns and rows, moving by offset in
+ * the array to allow some fields not read.
+ *
+ * columnsObj is the columns in the fits file to read.
+ * offsetsObj is the offsets of the corresponding fields into the array.
+ */
+static PyObject *
+PyFITSObject_read_columns_as_rec_byoffset(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ npy_intp ncols=0;
+ npy_intp noffsets=0;
+ npy_intp nrows=0, nsortind=0;
+ const npy_int64* colnums=NULL;
+ const npy_int64* offsets=NULL;
+ const npy_int64* rows=NULL, *sortind=NULL;
+
+ PyObject* columns_obj=NULL;
+ PyObject* offsets_obj=NULL;
+ PyObject* rows_obj=NULL;
+ PyObject* sortind_obj=NULL;
+
+ PyObject* array_obj=NULL;
+ PyArrayObject *array=NULL;
+ void* data=NULL;
+ npy_intp recsize=0;
+
+ if (!PyArg_ParseTuple(args, (char*)"iOOOOO",
+ &hdunum, &columns_obj, &offsets_obj, &array_obj, &rows_obj, &sortind_obj)) {
+ return NULL;
+ }
+
+ array = (PyArrayObject *) array_obj;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ goto recread_columns_byoffset_cleanup;
+ }
+
+ if (hdutype == IMAGE_HDU) {
+ PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+ return NULL;
+ }
+
+ colnums = (const npy_int64*) get_int64_from_array((PyArrayObject *) columns_obj, &ncols);
+ if (colnums == NULL) {
+ return NULL;
+ }
+ offsets = (const npy_int64*) get_int64_from_array((PyArrayObject *) offsets_obj, &noffsets);
+ if (offsets == NULL) {
+ return NULL;
+ }
+ if (noffsets != ncols) {
+ PyErr_Format(PyExc_ValueError,
+ "%ld columns requested but got %ld offsets",
+ ncols, noffsets);
+ return NULL;
+ }
+
+ if (rows_obj != Py_None) {
+ rows = (const npy_int64*) get_int64_from_array((PyArrayObject *) rows_obj, &nrows);
+ sortind = (const npy_int64*) get_int64_from_array((PyArrayObject *) sortind_obj, &nsortind);
+ } else {
+ nrows = PyArray_SIZE(array);
+ }
+
+ data = PyArray_DATA(array);
+ recsize = PyArray_ITEMSIZE(array);
+ if (read_columns_as_rec_byoffset(
+ self->fits,
+ ncols, colnums, offsets,
+ nrows,
+ rows,
+ sortind,
+ (char*) data,
+ recsize,
+ &status) > 0) {
+ goto recread_columns_byoffset_cleanup;
+ }
+
+recread_columns_byoffset_cleanup:
+
+ if (status != 0) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+// read specified rows, all columns
+static int read_rec_bytes_byrow(
+ fitsfile* fits,
+ npy_intp nrows, npy_int64* rows, npy_int64* sortind,
+ void* vdata, int* status) {
+
+ FITSfile* hdu=NULL;
+
+ npy_intp irow=0, si=0;
+ LONGLONG firstrow=1;
+ LONGLONG firstchar=1;
+
+ // use char for pointer arith. It's actually ok to use void as char but
+ // this is just in case.
+ unsigned char* ptr, *data;
+
+ // using struct defs here, could cause problems
+ hdu = fits->Fptr;
+ // ptr = (unsigned char*) data;
+ data = (unsigned char*) vdata;
+
+ for (irow=0; irow < nrows; irow++) {
+
+ si = sortind[irow];
+
+ // Input is zero-offset
+ firstrow = 1 + (LONGLONG) rows[si];
+
+ ptr = data + si * hdu->rowlength;
+
+ if (fits_read_tblbytes(fits, firstrow, firstchar, hdu->rowlength, ptr, status)) {
+ return 1;
+ }
+
+ // ptr += hdu->rowlength;
+ }
+
+ return 0;
+}
+// read specified rows, all columns
+/*
+static int read_rec_bytes_byrowold(
+ fitsfile* fits,
+ npy_intp nrows, npy_int64* rows,
+ void* data, int* status) {
+ FITSfile* hdu=NULL;
+ LONGLONG file_pos=0;
+
+ npy_intp irow=0;
+ npy_int64 row=0;
+
+ // use char for pointer arith. It's actually ok to use void as char but
+ // this is just in case.
+ char* ptr;
+
+ long ngroups=1; // number to read, one for row-by-row reading
+ long offset=0; // gap between groups, not stride. zero since we aren't using it
+
+ // using struct defs here, could cause problems
+ hdu = fits->Fptr;
+ ptr = (char*) data;
+
+ for (irow=0; irow<nrows; irow++) {
+ row = rows[irow];
+ file_pos = hdu->datastart + row*hdu->rowlength;
+
+ // can just do one status check, since status are inherited.
+ ffmbyt(fits, file_pos, REPORT_EOF, status);
+ if (ffgbytoff(fits, hdu->rowlength, ngroups, offset, (void*) ptr, status)) {
+ return 1;
+ }
+ ptr += hdu->rowlength;
+ }
+
+ return 0;
+}
+*/
+
+
+// python method to read all columns but subset of rows
+static PyObject *
+PyFITSObject_read_rows_as_rec(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+
+ int status=0;
+ void* data=NULL;
+
+ PyObject *array_obj=NULL, *rows_obj=NULL, *sortind_obj=NULL;
+ npy_intp nrows=0, nsortind=0;
+ npy_int64* rows=NULL;
+ npy_int64* sortind=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"iOOO", &hdunum, &array_obj, &rows_obj, &sortind_obj)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ goto recread_byrow_cleanup;
+ }
+
+ if (hdutype == IMAGE_HDU) {
+ PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+ return NULL;
+ }
+
+ data = PyArray_DATA((PyArrayObject *) array_obj);
+
+ rows = get_int64_from_array((PyArrayObject *) rows_obj, &nrows);
+ if (rows == NULL) {
+ return NULL;
+ }
+ sortind = get_int64_from_array((PyArrayObject *) sortind_obj, &nsortind);
+ if (sortind == NULL) {
+ return NULL;
+ }
+
+ if (read_rec_bytes_byrow(self->fits, nrows, rows, sortind, data, &status)) {
+ goto recread_byrow_cleanup;
+ }
+
+recread_byrow_cleanup:
+
+ if (status != 0) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+
+ /* Read the range of rows, 1-offset. It is assumed the data match the table
+ * perfectly.
+ */
+
+static int read_rec_range(fitsfile* fits, LONGLONG firstrow, LONGLONG nrows, void* data, int* status) {
+ // can also use this for reading row ranges
+ LONGLONG firstchar=1;
+ LONGLONG nchars=0;
+
+ nchars = (fits->Fptr)->rowlength*nrows;
+
+ if (fits_read_tblbytes(fits, firstrow, firstchar, nchars, (unsigned char*) data, status)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+
+
+
+/* here rows are 1-offset, unlike when reading a specific subset of rows */
+static PyObject *
+PyFITSObject_read_as_rec(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+
+ int status=0;
+ PyObject* array_obj=NULL;
+ void* data=NULL;
+
+ PY_LONG_LONG firstrow=0;
+ PY_LONG_LONG lastrow=0;
+ PY_LONG_LONG nrows=0;
+
+ if (!PyArg_ParseTuple(args, (char*)"iLLO", &hdunum, &firstrow, &lastrow, &array_obj)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ goto recread_asrec_cleanup;
+ }
+
+ if (hdutype == IMAGE_HDU) {
+ PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray");
+ return NULL;
+ }
+
+ data = PyArray_DATA((PyArrayObject *) array_obj);
+
+ nrows=lastrow-firstrow+1;
+ if (read_rec_range(self->fits, (LONGLONG)firstrow, (LONGLONG)nrows, data, &status)) {
+ goto recread_asrec_cleanup;
+ }
+
+recread_asrec_cleanup:
+
+ if (status != 0) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+
+// read an n-dimensional "image" into the input array. Only minimal checking
+// of the input array is done.
+// Note numpy allows a maximum of 32 dimensions
+static PyObject *
+PyFITSObject_read_image(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+ int status=0;
+ PyObject* array_obj=NULL;
+ PyArrayObject* array=NULL;
+ void* data=NULL;
+ int npy_dtype=0;
+ int dummy=0, fits_read_dtype=0;
+
+ int maxdim=NUMPY_MAX_DIMS; // numpy maximum
+ int datatype=0; // type info for axis
+ int naxis=0; // number of axes
+ int i=0;
+ LONGLONG naxes[NUMPY_MAX_DIMS];; // size of each axis
+ LONGLONG firstpixels[NUMPY_MAX_DIMS];
+ LONGLONG size=0;
+ npy_intp arrsize=0;
+
+ int anynul=0;
+
+ if (!PyArg_ParseTuple(args, (char*)"iO", &hdunum, &array_obj)) {
+ return NULL;
+ }
+
+ array = (PyArrayObject *) array_obj;
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ return NULL;
+ }
+
+ if (fits_get_img_paramll(self->fits, maxdim, &datatype, &naxis,
+ naxes, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // make sure dims match
+ size=0;
+ size = naxes[0];
+ for (i=1; i< naxis; i++) {
+ size *= naxes[i];
+ }
+ arrsize = PyArray_SIZE(array);
+ data = PyArray_DATA(array);
+
+ if (size != arrsize) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Input array size is %ld but on disk array size is %lld",
+ arrsize, size);
+ return NULL;
+ }
+
+ npy_dtype = PyArray_TYPE(array);
+ npy_to_fits_image_types(npy_dtype, &dummy, &fits_read_dtype);
+
+ for (i=0; i<naxis; i++) {
+ firstpixels[i] = 1;
+ }
+ if (fits_read_pixll(self->fits, fits_read_dtype, firstpixels, size,
+ 0, data, &anynul, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyFITSObject_read_raw(struct PyFITSObject* self, PyObject* args) {
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ //fitsfile* fits = self->fits;
+ FITSfile* FITS = self->fits->Fptr;
+ int status = 0;
+ char* filedata;
+ LONGLONG sz;
+ LONGLONG io_pos;
+ PyObject *stringobj;
+
+ // Flush (close & reopen HDU) to make everything consistent
+ ffflus(self->fits, &status);
+ if (status) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to flush FITS file data to disk; CFITSIO code %i",
+ status);
+ return NULL;
+ }
+ // Allocate buffer for string
+ sz = FITS->filesize;
+ // Create python string object of requested size, unitialized
+ stringobj = PyBytes_FromStringAndSize(NULL, sz);
+ if (!stringobj) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to allocate python string object to hold FITS file data: %i bytes",
+ (int)sz);
+ return NULL;
+ }
+ // Grab pointer to the memory buffer of the python string object
+ filedata = PyBytes_AsString(stringobj);
+ if (!filedata) {
+ Py_DECREF(stringobj);
+ return NULL;
+ }
+ // Remember old file position
+ io_pos = FITS->io_pos;
+ // Seek to beginning of file
+ if (ffseek(FITS, 0)) {
+ Py_DECREF(stringobj);
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to seek to beginning of FITS file");
+ return NULL;
+ }
+ // Read into filedata
+ if (ffread(FITS, sz, filedata, &status)) {
+ Py_DECREF(stringobj);
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to read file data into memory: CFITSIO code %i",
+ status);
+ return NULL;
+ }
+ // Seek back to where we were
+ if (ffseek(FITS, io_pos)) {
+ Py_DECREF(stringobj);
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to seek back to original FITS file position");
+ return NULL;
+ }
+ return stringobj;
+}
+
+static int get_long_slices(PyArrayObject* fpix_arr,
+ PyArrayObject* lpix_arr,
+ PyArrayObject* step_arr,
+ long** fpix,
+ long** lpix,
+ long** step) {
+
+ int i=0;
+ npy_int64* ptr=NULL;
+ npy_intp fsize=0, lsize=0, ssize=0;
+
+ fsize=PyArray_SIZE(fpix_arr);
+ lsize=PyArray_SIZE(lpix_arr);
+ ssize=PyArray_SIZE(step_arr);
+
+ if (lsize != fsize || ssize != fsize) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "start/end/step must be same len");
+ return 1;
+ }
+
+ *fpix=calloc(fsize, sizeof(long));
+ *lpix=calloc(fsize, sizeof(long));
+ *step=calloc(fsize, sizeof(long));
+
+ for (i=0;i<fsize;i++) {
+ ptr=PyArray_GETPTR1(fpix_arr, i);
+ (*fpix)[i] = *ptr;
+ ptr=PyArray_GETPTR1(lpix_arr, i);
+ (*lpix)[i] = *ptr;
+ ptr=PyArray_GETPTR1(step_arr, i);
+ (*step)[i] = *ptr;
+ }
+ return 0;
+}
+
+// read an n-dimensional "image" into the input array. Only minimal checking
+// of the input array is done.
+static PyObject *
+PyFITSObject_read_image_slice(struct PyFITSObject* self, PyObject* args) {
+ int hdunum=0;
+ int hdutype=0;
+ int status=0;
+ PyObject* fpix_obj=NULL;
+ PyObject* lpix_obj=NULL;
+ PyObject* step_obj=NULL;
+ int ignore_scaling=FALSE;
+ PyObject* array=NULL;
+ long* fpix=NULL;
+ long* lpix=NULL;
+ long* step=NULL;
+ void* data=NULL;
+ int npy_dtype=0;
+ int dummy=0, fits_read_dtype=0;
+
+ int anynul=0;
+
+ if (!PyArg_ParseTuple(args, (char*)"iOOOiO",
+ &hdunum, &fpix_obj, &lpix_obj, &step_obj, &ignore_scaling,
+ &array)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ return NULL;
+ }
+
+ if (ignore_scaling == TRUE
+ && fits_set_bscale(self->fits, 1.0, 0.0, &status)) {
+ return NULL;
+ }
+
+ if (get_long_slices(
+ (PyArrayObject *) fpix_obj,
+ (PyArrayObject *) lpix_obj,
+ (PyArrayObject *) step_obj,
+ &fpix,&lpix,&step)) {
+ return NULL;
+ }
+ data = PyArray_DATA((PyArrayObject *) array);
+
+ npy_dtype = PyArray_TYPE((PyArrayObject *) array);
+ npy_to_fits_image_types(npy_dtype, &dummy, &fits_read_dtype);
+
+ if (fits_read_subset(self->fits, fits_read_dtype, fpix, lpix, step,
+ 0, data, &anynul, &status)) {
+ set_ioerr_string_from_status(status);
+ goto read_image_slice_cleanup;
+ }
+
+read_image_slice_cleanup:
+ free(fpix);
+ free(lpix);
+ free(step);
+
+ if (status != 0) {
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+
+
+static int hierarch_is_string(const char* card)
+{
+ int i=0, is_string_value=1;
+
+ for (i=0; i<78; i++) {
+ if (card[i] == '=') {
+ // we found the equals, now if it is a string we
+ // now exactly where the quote must be
+ if (card[i+2] == '\'') {
+ is_string_value = 1;
+ } else {
+ is_string_value = 0;
+ }
+ }
+ }
+ return is_string_value;
+}
+
+// read the entire header as list of dicts with name,value,comment and full
+// card
+static PyObject *
+PyFITSObject_read_header(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+ int lcont=0, lcomm=0, ls=0;
+ int tocomp=0;
+ int is_comment_or_history=0, is_blank_key=0;
+ char *longstr=NULL;
+
+ char keyname[FLEN_KEYWORD];
+ char value[FLEN_VALUE];
+ char comment[FLEN_COMMENT];
+ char scomment[FLEN_COMMENT];
+ char card[FLEN_CARD];
+ long is_string_value=0;
+
+ LONGLONG lval=0;
+ double dval=0;
+
+ int nkeys=0, morekeys=0, i=0;
+ int has_equals=0, has_quote=0, was_converted=0, is_hierarch=0;
+
+ PyObject* list=NULL;
+ PyObject* dict=NULL; // to hold the dict for each record
+
+ lcont=strlen("CONTINUE");
+ lcomm=strlen("COMMENT");
+
+ if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+ return NULL;
+ }
+
+ if (self->fits == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL");
+ return NULL;
+ }
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_get_hdrspace(self->fits, &nkeys, &morekeys, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ list=PyList_New(0);
+ for (i=0; i<nkeys; i++) {
+
+ // the full card
+ if (fits_read_record(self->fits, i+1, card, &status)) {
+ Py_XDECREF(list);
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ // this just returns the character string stored in the header; we
+ // can eval in python
+ if (fits_read_keyn(self->fits, i+1, keyname, value, scomment, &status)) {
+ Py_XDECREF(list);
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ ls = strlen(keyname);
+ tocomp = (ls < lcont) ? ls : lcont;
+
+ is_blank_key = 0;
+ is_hierarch = 0;
+ if (ls == 0) {
+ is_blank_key = 1;
+ } else {
+
+ // skip CONTINUE, we already read the data
+ if (strncmp(keyname,"CONTINUE",tocomp)==0) {
+ continue;
+ }
+
+ if (strncmp(keyname, "COMMENT", tocomp) ==0
+ || strncmp(keyname, "HISTORY", tocomp )==0) {
+ is_comment_or_history = 1;
+
+ } else {
+ is_comment_or_history = 0;
+
+ if (fits_read_key_longstr(self->fits, keyname, &longstr, comment, &status)) {
+ Py_XDECREF(list);
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (strncmp(card,"HIERARCH",8)==0) {
+ is_hierarch = 1;
+ if (hierarch_is_string(card)) {
+ is_string_value=1;
+ } else {
+ is_string_value=0;
+ }
+ } else {
+ has_equals = (card[8] == '=') ? 1 : 0;
+ has_quote = (card[10] == '\'') ? 1 : 0;
+ if (has_equals && has_quote) {
+ is_string_value=1;
+ } else {
+ is_string_value=0;
+ }
+ }
+ }
+ }
+
+ dict = PyDict_New();
+ convert_to_ascii(card);
+ add_string_to_dict(dict, "card_string", card);
+
+ if (is_blank_key) {
+ add_none_to_dict(dict, "name");
+ add_string_to_dict(dict, "value", "");
+ convert_to_ascii(scomment);
+ add_string_to_dict(dict, "comment", scomment);
+
+ } else if (is_comment_or_history) {
+ // comment or history
+ convert_to_ascii(scomment);
+ add_string_to_dict(dict, "name", keyname);
+ add_string_to_dict(dict, "value", scomment);
+ add_string_to_dict(dict, "comment", scomment);
+
+ } else {
+
+ if (is_hierarch) {
+ // if a key is hierarch, then any ascii character is allowed except
+ // *, ? and #. Thus we convert any of those (and any chars we find
+ // that don't correspond to something written, ascii <= 32 or 127)
+ // to an underscore
+ // if the key is converted, then we cannot parse it further with
+ // cfitsio
+ was_converted = convert_keyword_to_allowed_ascii_template_and_nonascii_only(keyname);
+ } else {
+ // for non-hierach keys, we cannot use the cfitsio functions if any
+ // character besides those in the fits conventions (A-Z,a-z,0-9,_,-)
+ // are present. Thus we flag those and store their values as a string
+ // if this happens.
+ was_converted = has_invalid_keyword_chars(keyname);
+
+ // in order to actually store the key in the python dict, we have to cut
+ // out any non-ascii chars
+ // we additionally convert the template chars to that the fits data
+ // we make can be written back without error
+ // note that the check by has_invalid_keyword_chars is more stringent
+ // than the checks done here, so if any conversion is done it has already
+ // been flagged above.
+ convert_keyword_to_allowed_ascii_template_and_nonascii_only(keyname);
+ }
+ add_string_to_dict(dict,"name",keyname);
+ convert_to_ascii(comment);
+ add_string_to_dict(dict,"comment",comment);
+
+ // if not a comment but empty value, put in None
+ tocomp = (ls < lcomm) ? ls : lcomm;
+ // if (!is_string_value && 0==strlen(longstr) && !is_comment) {
+ if (!is_string_value && 0==strlen(longstr)) {
+
+ add_none_to_dict(dict, "value");
+
+ } else {
+
+ // if its a stringwe just store it.
+ if (is_string_value) {
+ convert_to_ascii(longstr);
+ add_string_to_dict(dict,"value",longstr);
+ } else if ( longstr[0]=='T' ) {
+ add_true_to_dict(dict, "value");
+ } else if (longstr[0]=='F') {
+ add_false_to_dict(dict, "value");
+ } else if (was_converted) {
+ // if we had to convert bad characters in the keyword name
+ // we can't attempt to get a numerical value using
+ // fits_read_key because some characters in a keyword name
+ // cause a seg fault
+ convert_to_ascii(longstr);
+ add_string_to_dict(dict,"value",longstr);
+ } else if (
+ (strchr(longstr,'.') != NULL)
+ || (strchr(longstr,'E') != NULL)
+ || (strchr(longstr,'e') != NULL) ) {
+ // we found a floating point value
+ fits_read_key(self->fits, TDOUBLE, keyname, &dval, comment, &status);
+ add_double_to_dict(dict,"value",dval);
+ } else {
+
+ // we might have found an integer
+ if (fits_read_key(self->fits,
+ TLONGLONG,
+ keyname,
+ &lval,
+ comment,
+ &status)) {
+
+ // something non standard, just store it as a string
+ convert_to_ascii(longstr);
+ add_string_to_dict(dict,"value",longstr);
+ status=0;
+
+ } else {
+ add_long_long_to_dict(dict,"value",(long long)lval);
+ }
+ }
+
+ }
+ }
+
+ free(longstr); longstr=NULL;
+
+ PyList_Append(list, dict);
+ Py_XDECREF(dict);
+
+ }
+
+ return list;
+}
+
+static PyObject *
+PyFITSObject_write_checksum(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ unsigned long datasum=0;
+ unsigned long hdusum=0;
+
+ PyObject* dict=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_write_chksum(self->fits, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+ if (fits_get_chksum(self->fits, &datasum, &hdusum, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ dict=PyDict_New();
+ add_long_long_to_dict(dict,"datasum",(long long)datasum);
+ add_long_long_to_dict(dict,"hdusum",(long long)hdusum);
+
+ return dict;
+}
+static PyObject *
+PyFITSObject_verify_checksum(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+
+ int dataok=0, hduok=0;
+
+ PyObject* dict=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) {
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ if (fits_verify_chksum(self->fits, &dataok, &hduok, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ dict=PyDict_New();
+ add_long_to_dict(dict,"dataok",(long)dataok);
+ add_long_to_dict(dict,"hduok",(long)hduok);
+
+ return dict;
+}
+
+
+
+static PyObject *
+PyFITSObject_where(struct PyFITSObject* self, PyObject* args) {
+ int status=0;
+ int hdunum=0;
+ int hdutype=0;
+ char* expression=NULL;
+
+ long firstrow;
+ long nrows;
+ long ngood=0;
+ char* row_status=NULL;
+
+
+ // Indices of rows for which expression is true
+ PyObject* indices_obj=NULL;
+ int ndim=1;
+ npy_intp dims[1];
+ npy_intp* data=NULL;
+ long i=0;
+
+
+ if (!PyArg_ParseTuple(args, (char*)"isll", &hdunum, &expression,
+ &firstrow, &nrows)) {
+ return NULL;
+ }
+
+ if (firstrow < 1 || nrows < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "firstrow and nrows must be positive integers");
+ return NULL;
+ }
+
+ if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ row_status = malloc(nrows*sizeof(char));
+ if (row_status==NULL) {
+ PyErr_SetString(PyExc_MemoryError, "Could not allocate row_status array");
+ return NULL;
+ }
+
+ if (fits_find_rows(self->fits, expression, firstrow, nrows, &ngood, row_status, &status)) {
+ set_ioerr_string_from_status(status);
+ goto where_function_cleanup;
+ }
+
+ dims[0] = ngood;
+ indices_obj = PyArray_EMPTY(ndim, dims, NPY_INTP, 0);
+ if (indices_obj == NULL) {
+ PyErr_SetString(PyExc_MemoryError, "Could not allocate index array");
+ goto where_function_cleanup;
+ }
+
+ if (ngood > 0) {
+ data = PyArray_DATA((PyArrayObject *) indices_obj);
+
+ for (i=0; i<nrows; i++) {
+ if (row_status[i]) {
+ *data = (npy_intp) i;
+ data++;
+ }
+ }
+ }
+where_function_cleanup:
+ free(row_status);
+ return indices_obj;
+}
+
+// generic functions, not tied to an object
+
+static PyObject *
+PyFITS_cfitsio_version(void) {
+ float version=0;
+ fits_get_version(&version);
+ return PyFloat_FromDouble((double)version);
+}
+
+static PyObject *
+PyFITS_cfitsio_use_standard_strings(void) {
+ if ( fits_use_standard_strings() ) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+
+/*
+
+'C', 'L', 'I', 'F' 'X'
+character string, logical, integer, floating point, complex
+
+*/
+
+static PyObject *
+PyFITS_get_keytype(PyObject* self, PyObject* args) {
+
+ int status=0;
+ char* card=NULL;
+ char dtype[2]={0};
+
+ if (!PyArg_ParseTuple(args, (char*)"s", &card)) {
+ return NULL;
+ }
+
+
+ if (fits_get_keytype(card, dtype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ } else {
+ return Py_BuildValue("s", dtype);
+ }
+}
+static PyObject *
+PyFITS_get_key_meta(PyObject* self, PyObject* args) {
+
+ int status=0;
+ char* card=NULL;
+ char dtype[2]={0};
+ int keyclass=0;
+
+ if (!PyArg_ParseTuple(args, (char*)"s", &card)) {
+ return NULL;
+ }
+
+
+ keyclass=fits_get_keyclass(card);
+
+ if (fits_get_keytype(card, dtype, &status)) {
+ set_ioerr_string_from_status(status);
+ return NULL;
+ }
+
+ return Py_BuildValue("is", keyclass, dtype);
+
+}
+
+/*
+
+ note the special first four comment fields will not be called comment but
+ structural! That will cause an exception to be raised, so the card should
+ be checked before calling this function
+
+*/
+
+static PyObject *
+PyFITS_parse_card(PyObject* self, PyObject* args) {
+
+ int status=0;
+ char name[FLEN_VALUE]={0};
+ char value[FLEN_VALUE]={0};
+ char comment[FLEN_COMMENT]={0};
+ int keylen=0;
+ int keyclass=0;
+ int is_undefined=0;
+
+ char* card=NULL;
+ char dtype[2]={0};
+ PyObject* output=NULL;
+
+ if (!PyArg_ParseTuple(args, (char*)"s", &card)) {
+ goto bail;
+ }
+
+ keyclass=fits_get_keyclass(card);
+
+ // only proceed if not comment or history, but note the special first four
+ // comment fields will not be called comment but structural! That will
+ // cause an exception to be raised, so the card should be checked before
+ // calling this function
+
+ if (keyclass != TYP_COMM_KEY && keyclass != TYP_CONT_KEY) {
+
+ if (fits_get_keyname(card, name, &keylen, &status)) {
+ set_ioerr_string_from_status(status);
+ goto bail;
+ }
+ if (fits_parse_value(card, value, comment, &status)) {
+ set_ioerr_string_from_status(status);
+ goto bail;
+ }
+ if (fits_get_keytype(value, dtype, &status)) {
+
+ if (status == VALUE_UNDEFINED) {
+ is_undefined=1;
+ status=0;
+ } else {
+ set_ioerr_string_from_status(status);
+ goto bail;
+ }
+ }
+ }
+
+bail:
+ if (status != 0) {
+ return NULL;
+ }
+
+ if (is_undefined) {
+ output=Py_BuildValue("isss", keyclass, name, dtype, comment);
+ } else {
+ output=Py_BuildValue("issss", keyclass, name, value, dtype, comment);
+ }
+ return output;
+}
+
+
+
+static PyMethodDef PyFITSObject_methods[] = {
+ {"filename", (PyCFunction)PyFITSObject_filename, METH_VARARGS, "filename\n\nReturn the name of the file."},
+
+ {"where", (PyCFunction)PyFITSObject_where, METH_VARARGS, "where\n\nReturn an index array where the input expression evaluates to true."},
+
+ {"movabs_hdu", (PyCFunction)PyFITSObject_movabs_hdu, METH_VARARGS, "movabs_hdu\n\nMove to the specified HDU."},
+ {"movnam_hdu", (PyCFunction)PyFITSObject_movnam_hdu, METH_VARARGS, "movnam_hdu\n\nMove to the specified HDU by name and return the hdu number."},
+
+ {"get_hdu_name_version", (PyCFunction)PyFITSObject_get_hdu_name_version, METH_VARARGS, "get_hdu_name_version\n\nReturn a tuple (extname,extvers)."},
+ {"get_hdu_info", (PyCFunction)PyFITSObject_get_hdu_info, METH_VARARGS, "get_hdu_info\n\nReturn a dict with info about the specified HDU."},
+ {"read_raw", (PyCFunction)PyFITSObject_read_raw, METH_NOARGS, "read_raw\n\nRead the entire raw contents of the FITS file, returning a python string."},
+ {"read_image", (PyCFunction)PyFITSObject_read_image, METH_VARARGS, "read_image\n\nRead the entire n-dimensional image array. No checking of array is done."},
+ {"read_image_slice", (PyCFunction)PyFITSObject_read_image_slice, METH_VARARGS, "read_image_slice\n\nRead an image slice."},
+ {"read_column", (PyCFunction)PyFITSObject_read_column, METH_VARARGS, "read_column\n\nRead the column into the input array. No checking of array is done."},
+ {"read_var_column_as_list", (PyCFunction)PyFITSObject_read_var_column_as_list, METH_VARARGS, "read_var_column_as_list\n\nRead the variable length column as a list of arrays."},
+ {"read_columns_as_rec", (PyCFunction)PyFITSObject_read_columns_as_rec, METH_VARARGS, "read_columns_as_rec\n\nRead the specified columns into the input rec array. No checking of array is done."},
+ {"read_columns_as_rec_byoffset", (PyCFunction)PyFITSObject_read_columns_as_rec_byoffset, METH_VARARGS, "read_columns_as_rec_byoffset\n\nRead the specified columns into the input rec array at the specified offsets. No checking of array is done."},
+ {"read_rows_as_rec", (PyCFunction)PyFITSObject_read_rows_as_rec, METH_VARARGS, "read_rows_as_rec\n\nRead the subset of rows into the input rec array. No checking of array is done."},
+ {"read_as_rec", (PyCFunction)PyFITSObject_read_as_rec, METH_VARARGS, "read_as_rec\n\nRead a set of rows into the input rec array. No significant checking of array is done."},
+ {"read_header", (PyCFunction)PyFITSObject_read_header, METH_VARARGS | METH_VARARGS, "read_header\n\nRead the entire header as a list of dictionaries."},
+
+ {"create_image_hdu", (PyCFunction)PyFITSObject_create_image_hdu, METH_VARARGS | METH_KEYWORDS, "create_image_hdu\n\nWrite the input image to a new extension."},
+ {"create_table_hdu", (PyCFunction)PyFITSObject_create_table_hdu, METH_VARARGS | METH_KEYWORDS, "create_table_hdu\n\nCreate a new table with the input parameters."},
+ {"insert_col", (PyCFunction)PyFITSObject_insert_col, METH_VARARGS | METH_KEYWORDS, "insert_col\n\nInsert a new column."},
+
+ {"write_checksum", (PyCFunction)PyFITSObject_write_checksum, METH_VARARGS, "write_checksum\n\nCompute and write the checksums into the header."},
+ {"verify_checksum", (PyCFunction)PyFITSObject_verify_checksum, METH_VARARGS, "verify_checksum\n\nReturn a dict with dataok and hduok."},
+
+ {"reshape_image", (PyCFunction)PyFITSObject_reshape_image, METH_VARARGS, "reshape_image\n\nReshape the image."},
+ {"write_image", (PyCFunction)PyFITSObject_write_image, METH_VARARGS, "write_image\n\nWrite the input image to a new extension."},
+ //{"write_column", (PyCFunction)PyFITSObject_write_column, METH_VARARGS | METH_KEYWORDS, "write_column\n\nWrite a column into the specified hdu."},
+ {"write_columns", (PyCFunction)PyFITSObject_write_columns, METH_VARARGS | METH_KEYWORDS, "write_columns\n\nWrite columns into the specified hdu."},
+ {"write_var_column", (PyCFunction)PyFITSObject_write_var_column, METH_VARARGS | METH_KEYWORDS, "write_var_column\n\nWrite a variable length column into the specified hdu from an object array."},
+ {"write_record", (PyCFunction)PyFITSObject_write_record, METH_VARARGS, "write_record\n\nWrite a header card."},
+ {"write_string_key", (PyCFunction)PyFITSObject_write_string_key, METH_VARARGS, "write_string_key\n\nWrite a string key into the specified HDU."},
+ {"write_double_key", (PyCFunction)PyFITSObject_write_double_key, METH_VARARGS, "write_double_key\n\nWrite a double key into the specified HDU."},
+
+ {"write_long_long_key", (PyCFunction)PyFITSObject_write_long_long_key, METH_VARARGS, "write_long_long_key\n\nWrite a long long key into the specified HDU."},
+ {"write_logical_key", (PyCFunction)PyFITSObject_write_logical_key, METH_VARARGS, "write_logical_key\n\nWrite a logical key into the specified HDU."},
+
+ {"write_comment", (PyCFunction)PyFITSObject_write_comment, METH_VARARGS, "write_comment\n\nWrite a comment into the header of the specified HDU."},
+ {"write_history", (PyCFunction)PyFITSObject_write_history, METH_VARARGS, "write_history\n\nWrite history into the header of the specified HDU."},
+ {"write_continue", (PyCFunction)PyFITSObject_write_continue, METH_VARARGS, "write_continue\n\nWrite contineu into the header of the specified HDU."},
+
+ {"write_undefined_key", (PyCFunction)PyFITSObject_write_undefined_key, METH_VARARGS, "write_undefined_key\n\nWrite a key without a value field into the header of the specified HDU."},
+
+ {"insert_rows", (PyCFunction)PyFITSObject_insert_rows, METH_VARARGS, "Insert blank rows"},
+
+ {"delete_row_range", (PyCFunction)PyFITSObject_delete_row_range, METH_VARARGS, "Delete a range of rows"},
+ {"delete_rows", (PyCFunction)PyFITSObject_delete_rows, METH_VARARGS, "Delete a set of rows"},
+
+ {"close", (PyCFunction)PyFITSObject_close, METH_VARARGS, "close\n\nClose the fits file."},
+ {NULL} /* Sentinel */
+};
+
+static PyTypeObject PyFITSType = {
+#if PY_MAJOR_VERSION >= 3
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+#endif
+ "_fitsio.FITS", /*tp_name*/
+ sizeof(struct PyFITSObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)PyFITSObject_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ //0, /*tp_repr*/
+ (reprfunc)PyFITSObject_repr, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "FITSIO Class", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ PyFITSObject_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ //0, /* tp_init */
+ (initproc)PyFITSObject_init, /* tp_init */
+ 0, /* tp_alloc */
+ //PyFITSObject_new, /* tp_new */
+ PyType_GenericNew, /* tp_new */
+};
+
+
+static PyMethodDef fitstype_methods[] = {
+ {"cfitsio_version", (PyCFunction)PyFITS_cfitsio_version, METH_NOARGS, "cfitsio_version\n\nReturn the cfitsio version."},
+ {"cfitsio_use_standard_strings", (PyCFunction)PyFITS_cfitsio_use_standard_strings, METH_NOARGS, "cfitsio_use_standard_strings\n\nReturn True if using string code that matches the FITS standard."},
+ {"parse_card", (PyCFunction)PyFITS_parse_card, METH_VARARGS, "parse_card\n\nparse the card to get the key name, value (as a string), data type and comment."},
+ {"get_keytype", (PyCFunction)PyFITS_get_keytype, METH_VARARGS, "get_keytype\n\nparse the card to get the key type."},
+ {"get_key_meta", (PyCFunction)PyFITS_get_key_meta, METH_VARARGS, "get_key_meta\n\nparse the card to get key metadata (keyclass,dtype)."},
+ {NULL} /* Sentinel */
+};
+
+#if PY_MAJOR_VERSION >= 3
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_fitsio_wrap", /* m_name */
+ "Defines the FITS class and some methods", /* m_doc */
+ -1, /* m_size */
+ fitstype_methods, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL, /* m_free */
+ };
+#endif
+
+
+#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+PyInit__fitsio_wrap(void)
+#else
+init_fitsio_wrap(void)
+#endif
+{
+ PyObject* m;
+
+ PyFITSType.tp_new = PyType_GenericNew;
+
+#if PY_MAJOR_VERSION >= 3
+ if (PyType_Ready(&PyFITSType) < 0) {
+ return NULL;
+ }
+ m = PyModule_Create(&moduledef);
+ if (m==NULL) {
+ return NULL;
+ }
+
+#else
+ if (PyType_Ready(&PyFITSType) < 0) {
+ return;
+ }
+ m = Py_InitModule3("_fitsio_wrap", fitstype_methods, "Define FITS type and methods.");
+ if (m==NULL) {
+ return;
+ }
+#endif
+
+ Py_INCREF(&PyFITSType);
+ PyModule_AddObject(m, "FITS", (PyObject *)&PyFITSType);
+
+ import_array();
+#if PY_MAJOR_VERSION >= 3
+ return m;
+#endif
+}
--- /dev/null
+"""
+fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+ Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+from __future__ import with_statement, print_function
+import os
+import numpy
+
+from . import _fitsio_wrap
+from .util import IS_PY3, mks, array_to_native, isstring, copy_if_needed
+from .header import FITSHDR
+from .hdu import (
+ ANY_HDU, IMAGE_HDU, BINARY_TBL, ASCII_TBL,
+ ImageHDU, AsciiTableHDU, TableHDU,
+ _table_npy2fits_form, _npy2fits, _hdu_type_map)
+
+from .fits_exceptions import FITSFormatError
+
+# for python3 compat
+if IS_PY3:
+ xrange = range
+
+
+READONLY = 0
+READWRITE = 1
+
+NOCOMPRESS = 0
+RICE_1 = 11
+GZIP_1 = 21
+GZIP_2 = 22
+PLIO_1 = 31
+HCOMPRESS_1 = 41
+
+NO_DITHER = -1
+SUBTRACTIVE_DITHER_1 = 1
+SUBTRACTIVE_DITHER_2 = 2
+
+# defaults follow fpack
+DEFAULT_QLEVEL = 4.0
+DEFAULT_QMETHOD = 'SUBTRACTIVE_DITHER_1'
+DEFAULT_HCOMP_SCALE = 0.0
+
+
+def read(filename, ext=None, extver=None, columns=None, rows=None,
+ header=False, case_sensitive=False, upper=False, lower=False,
+ vstorage='fixed', verbose=False, trim_strings=False, **keys):
+ """
+ Convenience function to read data from the specified FITS HDU
+
+ By default, all data are read. For tables, send columns= and rows= to
+ select subsets of the data. Table data are read into a recarray; use a
+ FITS object and read_column() to get a single column as an ordinary array.
+ For images, create a FITS object and use slice notation to read subsets.
+
+ Under the hood, a FITS object is constructed and data are read using
+ an associated FITSHDU object.
+
+ parameters
+ ----------
+ filename: string
+ A filename.
+ ext: number or string, optional
+ The extension. Either the numerical extension from zero
+ or a string extension name. If not sent, data is read from
+ the first HDU that has data.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname). These
+ extensions can optionally specify an EXTVER version number in the
+ header. Send extver= to select a particular version. If extver is not
+ sent, the first one will be selected. If ext is an integer, the extver
+ is ignored.
+ columns: list or array, optional
+ An optional set of columns to read from table HDUs. Default is to
+ read all. Can be string or number.
+ rows: optional
+ An optional list of rows to read from table HDUS. Default is to
+ read all.
+ header: bool, optional
+ If True, read the FITS header and return a tuple (data,header)
+ Default is False.
+ case_sensitive: bool, optional
+ Match column names and extension names with case-sensitivity. Default
+ is False.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Default is
+ False.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Default is
+ False.
+ vstorage: string, optional
+ Set the default method to store variable length columns. Can be
+ 'fixed' or 'object'. See docs on fitsio.FITS for details. Default is
+ 'fixed'.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ verbose: bool, optional
+ If True, print more info when doing various FITS operations.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ kwargs = {
+ 'lower': lower,
+ 'upper': upper,
+ 'vstorage': vstorage,
+ 'case_sensitive': case_sensitive,
+ 'verbose': verbose,
+ 'trim_strings': trim_strings
+ }
+
+ read_kwargs = {}
+ if columns is not None:
+ read_kwargs['columns'] = columns
+ if rows is not None:
+ read_kwargs['rows'] = rows
+
+ with FITS(filename, **kwargs) as fits:
+
+ if ext is None:
+ for i in xrange(len(fits)):
+ if fits[i].has_data():
+ ext = i
+ break
+ if ext is None:
+ raise IOError("No extensions have data")
+
+ item = _make_item(ext, extver=extver)
+
+ data = fits[item].read(**read_kwargs)
+ if header:
+ h = fits[item].read_header()
+ return data, h
+ else:
+ return data
+
+
+def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys):
+ """
+ Convenience function to read the header from the specified FITS HDU
+
+ The FITSHDR allows access to the values and comments by name and
+ number.
+
+ parameters
+ ----------
+ filename: string
+ A filename.
+ ext: number or string, optional
+ The extension. Either the numerical extension from zero
+ or a string extension name. Default read primary header.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname). These
+ extensions can optionally specify an EXTVER version number in the
+ header. Send extver= to select a particular version. If extver is not
+ sent, the first one will be selected. If ext is an integer, the extver
+ is ignored.
+ case_sensitive: bool, optional
+ Match extension names with case-sensitivity. Default is False.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ filename = extract_filename(filename)
+
+ dont_create = 0
+ try:
+ hdunum = ext+1
+ except TypeError:
+ hdunum = None
+
+ _fits = _fitsio_wrap.FITS(filename, READONLY, dont_create)
+
+ if hdunum is None:
+ extname = mks(ext)
+ if extver is None:
+ extver_num = 0
+ else:
+ extver_num = extver
+
+ if not case_sensitive:
+ # the builtin movnam_hdu is not case sensitive
+ hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num)
+ else:
+ # for case sensitivity we'll need to run through
+ # all the hdus
+ found = False
+ current_ext = 0
+ while True:
+ hdunum = current_ext+1
+ try:
+ hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used
+ name, vers = _fits.get_hdu_name_version(hdunum)
+ if name == extname:
+ if extver is None:
+ # take the first match
+ found = True
+ break
+ else:
+ if extver_num == vers:
+ found = True
+ break
+ except OSError:
+ break
+
+ current_ext += 1
+
+ if not found:
+ raise IOError(
+ 'hdu not found: %s (extver %s)' % (extname, extver))
+
+ return FITSHDR(_fits.read_header(hdunum))
+
+
+def read_scamp_head(fname, header=None):
+ """
+ read a SCAMP .head file as a fits header FITSHDR object
+
+ parameters
+ ----------
+ fname: string
+ The path to the SCAMP .head file
+
+ header: FITSHDR, optional
+ Optionally combine the header with the input one. The input can
+ be any object convertable to a FITSHDR object
+
+ returns
+ -------
+ header: FITSHDR
+ A fits header object of type FITSHDR
+ """
+
+ with open(fname) as fobj:
+ lines = fobj.readlines()
+
+ lines = [line.strip() for line in lines if line[0:3] != 'END']
+
+ # if header is None an empty FITSHDR is created
+ hdr = FITSHDR(header)
+
+ for line in lines:
+ hdr.add_record(line)
+
+ return hdr
+
+
+def _make_item(ext, extver=None):
+ if extver is not None:
+ # e
+ item = (ext, extver)
+ else:
+ item = ext
+
+ return item
+
+
+def write(filename, data, extname=None, extver=None, header=None,
+ clobber=False, ignore_empty=False, units=None, table_type='binary',
+ names=None, write_bitcols=False, compress=None, tile_dims=None,
+ qlevel=DEFAULT_QLEVEL,
+ qmethod=DEFAULT_QMETHOD,
+ dither_seed=None,
+ hcomp_scale=DEFAULT_HCOMP_SCALE,
+ hcomp_smooth=False,
+ **keys):
+ """
+ Convenience function to create a new HDU and write the data.
+
+ Under the hood, a FITS object is constructed. If you want to append rows
+ to an existing HDU, or modify data in an HDU, please construct a FITS
+ object.
+
+ parameters
+ ----------
+ filename: string
+ A filename.
+ data: numpy.ndarray or recarray
+ Either a normal n-dimensional array or a recarray. Images are written
+ to a new IMAGE_HDU and recarrays are written to BINARY_TBl or
+ ASCII_TBL hdus.
+ extname: string, optional
+ An optional name for the new header unit.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname).
+ These extensions can optionally specify an EXTVER version number in
+ the header. Send extver= to set a particular version, which will
+ be represented in the header with keyname EXTVER. The extver must
+ be an integer > 0. If extver is not sent, the first one will be
+ selected. If ext is an integer, the extver is ignored.
+ header: FITSHDR, list, dict, optional
+ A set of header keys to write. The keys are written before the data
+ is written to the table, preventing a resizing of the table area.
+
+ Can be one of these:
+ - FITSHDR object
+ - list of dictionaries containing 'name','value' and optionally
+ a 'comment' field; the order is preserved.
+ - a dictionary of keyword-value pairs; no comments are written
+ in this case, and the order is arbitrary.
+ Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+ clobber: bool, optional
+ If True, overwrite any existing file. Default is to append
+ a new extension on existing files.
+ ignore_empty: bool, optional
+ Default False. Unless set to True, only allow
+ empty HDUs in the zero extension.
+
+ table-only keywords
+ -------------------
+ units: list
+ A list of strings representing units for each column.
+ table_type: string, optional
+ Either 'binary' or 'ascii', default 'binary'
+ Matching is case-insensitive
+ write_bitcols: bool, optional
+ Write boolean arrays in the FITS bitcols format, default False
+ names: list, optional
+ If data is a list of arrays, you must send `names` as a list
+ of names or column numbers.
+
+ image-only keywords
+ -------------------
+ compress: string, optional
+ A string representing the compression algorithm for images,
+ default None.
+ Can be one of
+ 'RICE'
+ 'GZIP'
+ 'GZIP_2'
+ 'PLIO' (no unsigned or negative integers)
+ 'HCOMPRESS'
+ (case-insensitive) See the cfitsio manual for details.
+ tile_dims: tuple of ints, optional
+ The size of the tiles used to compress images.
+ qlevel: float, optional
+ Quantization level for floating point data. Lower generally result in
+ more compression, we recommend one reads the FITS standard or cfitsio
+ manual to fully understand the effects of quantization. None or 0
+ means no quantization, and for gzip also implies lossless. Default is
+ 4.0 which follows the fpack defaults
+ qmethod: string or int
+ The quantization method as string or integer.
+ 'NO_DITHER' or fitsio.NO_DITHER (-1)
+ No dithering is performed
+ 'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+ Standard dithering
+ 'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+ Preserves zeros
+
+ Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+
+ dither_seed: int or None
+ Seed for the subtractive dither. Seeding makes the lossy compression
+ reproducible. Allowed values are
+ None or 0 or 'clock':
+ do not set the seed explicitly, use the system clock
+ negative or 'checksum':
+ Set the seed based on the data checksum
+ 1-10_000:
+ use the input seed
+
+ hcomp_scale: float
+ Scale value for HCOMPRESS, 0.0 means lossless compression. Default is
+ 0.0 following the fpack defaults.
+ hcomp_smooth: bool
+ If True, apply smoothing when decompressing. Default False
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ kwargs = {
+ 'clobber': clobber,
+ 'ignore_empty': ignore_empty
+ }
+ with FITS(filename, 'rw', **kwargs) as fits:
+ fits.write(
+ data,
+ table_type=table_type,
+ units=units,
+ extname=extname,
+ extver=extver,
+ header=header,
+ names=names,
+ write_bitcols=write_bitcols,
+
+ compress=compress,
+ tile_dims=tile_dims,
+ qlevel=qlevel,
+ qmethod=qmethod,
+ dither_seed=dither_seed,
+ hcomp_scale=hcomp_scale,
+ hcomp_smooth=hcomp_smooth,
+ )
+
+
+class FITS(object):
+ """
+ A class to read and write FITS images and tables.
+
+ This class uses the cfitsio library for almost all relevant work.
+
+ parameters
+ ----------
+ filename: string
+ The filename to open.
+ mode: int/string, optional
+ The mode, either a string or integer.
+ For reading only
+ 'r' or 0
+ For reading and writing
+ 'rw' or 1
+ You can also use fitsio.READONLY and fitsio.READWRITE.
+
+ Default is 'r'
+ clobber: bool, optional
+ If the mode is READWRITE, and clobber=True, then remove any existing
+ file before opening.
+ case_sensitive: bool, optional
+ Match column names and extension names with case-sensitivity. Default
+ is False.
+ lower: bool, optional
+ If True, force all columns names to lower case in output
+ upper: bool, optional
+ If True, force all columns names to upper case in output
+ vstorage: string, optional
+ A string describing how, by default, to store variable length columns
+ in the output array. This can be over-ridden when reading by using the
+ using vstorage keyword to the individual read methods. The options are
+
+ 'fixed': Use a fixed length field in the array, with
+ dimensions equal to the max possible size for column.
+ Arrays are padded with zeros.
+ 'object': Use an object for the field in the array.
+ Each element will then be an array of the right type,
+ but only using the memory needed to hold that element.
+
+ Default is 'fixed'. The rationale is that this is the option
+ of 'least surprise'
+ iter_row_buffer: integer
+ Number of rows to buffer when iterating over table HDUs.
+ Default is 1.
+ ignore_empty: bool, optional
+ Default False. Unless set to True, only allow
+ empty HDUs in the zero extension.
+ verbose: bool, optional
+ If True, print more info when doing various FITS operations.
+
+ See the docs at https://github.com/esheldon/fitsio
+ """
+ def __init__(self, filename, mode='r', lower=False, upper=False,
+ trim_strings=False, vstorage='fixed', case_sensitive=False,
+ iter_row_buffer=1, write_bitcols=False, ignore_empty=False,
+ verbose=False, clobber=False, **keys):
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ self.lower = lower
+ self.upper = upper
+ self.trim_strings = trim_strings
+ self.vstorage = vstorage
+ self.case_sensitive = case_sensitive
+ self.iter_row_buffer = iter_row_buffer
+ self.write_bitcols = write_bitcols
+ filename = extract_filename(filename)
+ self._filename = filename
+
+ # self.mode=keys.get('mode','r')
+ self.mode = mode
+ self.ignore_empty = ignore_empty
+
+ self.verbose = verbose
+
+ if self.mode not in _int_modemap:
+ raise IOError("mode should be one of 'r', 'rw', "
+ "READONLY,READWRITE")
+
+ self.charmode = _char_modemap[self.mode]
+ self.intmode = _int_modemap[self.mode]
+
+ # Will not test existence when reading, let cfitsio
+ # do the test and report an error. This allows opening
+ # urls etc.
+ create = 0
+ if self.mode in [READWRITE, 'rw']:
+ if clobber:
+ create = 1
+ if filename[0] != '!':
+ filename = '!' + filename
+ else:
+ if os.path.exists(filename):
+ create = 0
+ else:
+ create = 1
+
+ self._did_create = (create == 1)
+ self._FITS = _fitsio_wrap.FITS(filename, self.intmode, create)
+
+ def close(self):
+ """
+ Close the fits file and set relevant metadata to None
+ """
+ if hasattr(self, '_FITS'):
+ if self._FITS is not None:
+ self._FITS.close()
+ self._FITS = None
+ self._filename = None
+ self.mode = None
+ self.charmode = None
+ self.intmode = None
+ self.hdu_list = None
+ self.hdu_map = None
+
+ def movabs_ext(self, ext):
+ """
+ Move to the indicated zero-offset extension.
+
+ In general, it is not necessary to use this method explicitly.
+ """
+ return self.movabs_hdu(ext+1)
+
+ def movabs_hdu(self, hdunum):
+ """
+ Move to the indicated one-offset hdu number.
+
+ In general, it is not necessary to use this method explicitly.
+ """
+
+ format_err = False
+
+ try:
+ hdu_type = self._FITS.movabs_hdu(hdunum)
+ except IOError as err:
+ # to support python 2 we can't use exception chaining.
+ # do this to avoid "During handling of the above exception, another
+ # exception occurred:"
+ serr = str(err)
+ if 'first keyword not XTENSION' in serr:
+ format_err = True
+ else:
+ raise
+
+ if format_err:
+ raise FITSFormatError(serr)
+
+ return hdu_type
+
+ def movnam_ext(self, extname, hdutype=ANY_HDU, extver=0):
+ """
+ Move to the indicated extension by name
+
+ In general, it is not necessary to use this method explicitly.
+
+ returns the zero-offset extension number
+ """
+ extname = mks(extname)
+ hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
+ return hdu-1
+
+ def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0):
+ """
+ Move to the indicated HDU by name
+
+ In general, it is not necessary to use this method explicitly.
+
+ returns the one-offset extension number
+ """
+ format_err = False
+
+ extname = mks(extname)
+ try:
+ hdu = self._FITS.movnam_hdu(hdutype, extname, extver)
+ except IOError as err:
+ # to support python 2 we can't use exception chaining.
+ # do this to avoid "During handling of the above exception, another
+ # exception occurred:"
+ serr = str(err)
+ if 'first keyword not XTENSION' in serr:
+ format_err = True
+ else:
+ raise
+
+ if format_err:
+ raise FITSFormatError(serr)
+
+ return hdu
+
+ def reopen(self):
+ """
+ close and reopen the fits file with the same mode
+ """
+ self._FITS.close()
+ del self._FITS
+ self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0)
+ self.update_hdu_list()
+
+ def write(self, data, units=None, extname=None, extver=None,
+ compress=None,
+ tile_dims=None,
+ qlevel=DEFAULT_QLEVEL,
+ qmethod=DEFAULT_QMETHOD,
+ dither_seed=None,
+ hcomp_scale=DEFAULT_HCOMP_SCALE,
+ hcomp_smooth=False,
+ header=None, names=None,
+ table_type='binary', write_bitcols=False, **keys):
+ """
+ Write the data to a new HDU.
+
+ This method is a wrapper. If this is an IMAGE_HDU, write_image is
+ called, otherwise write_table is called.
+
+ parameters
+ ----------
+ data: ndarray
+ An n-dimensional image or an array with fields.
+ extname: string, optional
+ An optional extension name.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname).
+ These extensions can optionally specify an EXTVER version number in
+ the header. Send extver= to set a particular version, which will
+ be represented in the header with keyname EXTVER. The extver must
+ be an integer > 0. If extver is not sent, the first one will be
+ selected. If ext is an integer, the extver is ignored.
+ header: FITSHDR, list, dict, optional
+ A set of header keys to write. Can be one of these:
+ - FITSHDR object
+ - list of dictionaries containing 'name','value' and optionally
+ a 'comment' field; the order is preserved.
+ - a dictionary of keyword-value pairs; no comments are written
+ in this case, and the order is arbitrary.
+ Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+
+ image-only keywords
+ -------------------
+ compress: string, optional
+ A string representing the compression algorithm for images,
+ default None.
+ Can be one of
+ 'RICE'
+ 'GZIP'
+ 'GZIP_2'
+ 'PLIO' (no unsigned or negative integers)
+ 'HCOMPRESS'
+ (case-insensitive) See the cfitsio manual for details.
+ tile_dims: tuple of ints, optional
+ The size of the tiles used to compress images.
+ qlevel: float, optional
+ Quantization level for floating point data. Lower generally result
+ in more compression, we recommend one reads the FITS standard or
+ cfitsio manual to fully understand the effects of quantization.
+ None or 0 means no quantization, and for gzip also implies
+ lossless. Default is 4.0 which follows the fpack defaults
+ qmethod: string or int
+ The quantization method as string or integer.
+ 'NO_DITHER' or fitsio.NO_DITHER (-1)
+ No dithering is performed
+ 'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+ Standard dithering
+ 'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+ Preserves zeros
+
+ Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+ dither_seed: int or None
+ Seed for the subtractive dither. Seeding makes the lossy
+ compression reproducible. Allowed values are
+ None or 0 or 'clock':
+ do not set the seed explicitly, use the system clock
+ negative or 'checksum':
+ Set the seed based on the data checksum
+ 1-10_000:
+ use the input seed
+
+ hcomp_scale: float
+ Scale value for HCOMPRESS, 0.0 means lossless compression. Default
+ is 0.0 following the fpack defaults.
+ hcomp_smooth: bool
+ If True, apply smoothing when decompressing. Default False
+
+ table-only keywords
+ -------------------
+ units: list/dec, optional:
+ A list of strings with units for each column.
+ table_type: string, optional
+ Either 'binary' or 'ascii', default 'binary'
+ Matching is case-insensitive
+ write_bitcols: bool, optional
+ Write boolean arrays in the FITS bitcols format, default False
+ names: list, optional
+ If data is a list of arrays, you must send `names` as a list
+ of names or column numbers.
+
+ restrictions
+ ------------
+ The File must be opened READWRITE
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ isimage = False
+ if data is None:
+ isimage = True
+ elif isinstance(data, numpy.ndarray):
+ if data.dtype.fields == None: # noqa - probably should be is None
+ isimage = True
+
+ if isimage:
+ self.write_image(data, extname=extname, extver=extver,
+ compress=compress,
+ tile_dims=tile_dims,
+ qlevel=qlevel,
+ qmethod=qmethod,
+ dither_seed=dither_seed,
+ hcomp_scale=hcomp_scale,
+ hcomp_smooth=hcomp_smooth,
+ header=header)
+ else:
+ self.write_table(data, units=units,
+ extname=extname, extver=extver, header=header,
+ names=names,
+ table_type=table_type,
+ write_bitcols=write_bitcols)
+
+ def write_image(self, img, extname=None, extver=None,
+ compress=None, tile_dims=None,
+ qlevel=DEFAULT_QLEVEL,
+ qmethod=DEFAULT_QMETHOD,
+ dither_seed=None,
+ hcomp_scale=DEFAULT_HCOMP_SCALE,
+ hcomp_smooth=False,
+ header=None):
+ """
+ Create a new image extension and write the data.
+
+ parameters
+ ----------
+ img: ndarray
+ An n-dimensional image.
+ extname: string, optional
+ An optional extension name.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname).
+ These extensions can optionally specify an EXTVER version number in
+ the header. Send extver= to set a particular version, which will
+ be represented in the header with keyname EXTVER. The extver must
+ be an integer > 0. If extver is not sent, the first one will be
+ selected. If ext is an integer, the extver is ignored.
+ compress: string, optional
+ A string representing the compression algorithm for images,
+ default None.
+ Can be one of
+ 'RICE'
+ 'GZIP'
+ 'GZIP_2'
+ 'PLIO' (no unsigned or negative integers)
+ 'HCOMPRESS'
+ (case-insensitive) See the cfitsio manual for details.
+ tile_dims: tuple of ints, optional
+ The size of the tiles used to compress images.
+ qlevel: float, optional
+ Quantization level for floating point data. Lower generally result
+ in more compression, we recommend one reads the FITS standard or
+ cfitsio manual to fully understand the effects of quantization.
+ None or 0 means no quantization, and for gzip also implies
+ lossless. Default is 4.0 which follows the fpack defaults
+ qmethod: string or int
+ The quantization method as string or integer.
+ 'NO_DITHER' or fitsio.NO_DITHER (-1)
+ No dithering is performed
+ 'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+ Standard dithering
+ 'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+ Preserves zeros
+
+ Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+ dither_seed: int or None
+ Seed for the subtractive dither. Seeding makes the lossy
+ compression reproducible. Allowed values are
+ None or 0 or 'clock':
+ do not set the seed explicitly, use the system clock
+ negative or 'checksum':
+ Set the seed based on the data checksum
+ 1-10_000:
+ use the input seed
+
+ hcomp_scale: float
+ Scale value for HCOMPRESS, 0.0 means lossless compression. Default
+ is 0.0 following the fpack defaults.
+ hcomp_smooth: bool
+ If True, apply smoothing when decompressing. Default False
+
+ header: FITSHDR, list, dict, optional
+ A set of header keys to write. Can be one of these:
+ - FITSHDR object
+ - list of dictionaries containing 'name','value' and optionally
+ a 'comment' field; the order is preserved.
+ - a dictionary of keyword-value pairs; no comments are written
+ in this case, and the order is arbitrary.
+ Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+
+
+ restrictions
+ ------------
+ The File must be opened READWRITE
+ """
+
+ self.create_image_hdu(
+ img,
+ header=header,
+ extname=extname,
+ extver=extver,
+ compress=compress,
+ tile_dims=tile_dims,
+ qlevel=qlevel,
+ qmethod=qmethod,
+ dither_seed=dither_seed,
+ hcomp_scale=hcomp_scale,
+ hcomp_smooth=hcomp_smooth,
+ )
+
+ if header is not None:
+ self[-1].write_keys(header)
+ self[-1]._update_info()
+
+ # if img is not None:
+ # self[-1].write(img)
+
+ def create_image_hdu(self,
+ img=None,
+ dims=None,
+ dtype=None,
+ extname=None,
+ extver=None,
+ compress=None,
+ tile_dims=None,
+ qlevel=DEFAULT_QLEVEL,
+ qmethod=DEFAULT_QMETHOD,
+ dither_seed=None,
+ hcomp_scale=DEFAULT_HCOMP_SCALE,
+ hcomp_smooth=False,
+ header=None):
+ """
+ Create a new, empty image HDU and reload the hdu list. Either
+ create from an input image or from input dims and dtype
+
+ fits.create_image_hdu(image, ...)
+ fits.create_image_hdu(dims=dims, dtype=dtype)
+
+ If an image is sent, the data are also written.
+
+ You can write data into the new extension using
+ fits[extension].write(image)
+
+ Alternatively you can skip calling this function and instead just use
+
+ fits.write(image)
+ or
+ fits.write_image(image)
+
+ which will create the new image extension for you with the appropriate
+ structure, and write the data.
+
+ parameters
+ ----------
+ img: ndarray, optional
+ An image with which to determine the properties of the HDU. The
+ data will be written.
+ dims: sequence, optional
+ A sequence describing the dimensions of the image to be created
+ on disk. You must also send a dtype=
+ dtype: numpy data type
+ When sending dims= also send the data type. Can be of the
+ various numpy data type declaration styles, e.g. 'f8',
+ numpy.float64.
+ extname: string, optional
+ An optional extension name.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname).
+ These extensions can optionally specify an EXTVER version number in
+ the header. Send extver= to set a particular version, which will
+ be represented in the header with keyname EXTVER. The extver must
+ be an integer > 0. If extver is not sent, the first one will be
+ selected. If ext is an integer, the extver is ignored.
+ compress: string, optional
+ A string representing the compression algorithm for images,
+ default None.
+ Can be one of
+ 'RICE'
+ 'GZIP'
+ 'GZIP_2'
+ 'PLIO' (no unsigned or negative integers)
+ 'HCOMPRESS'
+ (case-insensitive) See the cfitsio manual for details.
+ tile_dims: tuple of ints, optional
+ The size of the tiles used to compress images.
+ qlevel: float, optional
+ Quantization level for floating point data. Lower generally result
+ in more compression, we recommend one reads the FITS standard or
+ cfitsio manual to fully understand the effects of quantization.
+ None or 0 means no quantization, and for gzip also implies
+ lossless. Default is 4.0 which follows the fpack defaults.
+ qmethod: string or int
+ The quantization method as string or integer.
+ 'NO_DITHER' or fitsio.NO_DITHER (-1)
+ No dithering is performed
+ 'SUBTRACTIVE_DITHER_1' or fitsio.SUBTRACTIVE_DITHER_1 (1)
+ Standard dithering
+ 'SUBTRACTIVE_DITHER_2' or fitsio.SUBTRACTIVE_DITHER_2 (2)
+ Preserves zeros
+
+ Defaults to 'SUBTRACTIVE_DITHER_1' which follows the fpack defaults
+ dither_seed: int or None
+ Seed for the subtractive dither. Seeding makes the lossy
+ compression reproducible. Allowed values are
+ None or 0 or 'clock':
+ do not set the seed explicitly, use the system clock
+ negative or 'checksum':
+ Set the seed based on the data checksum
+ 1-10_000:
+ use the input seed
+ hcomp_scale: float
+ Scale value for HCOMPRESS, 0.0 means lossless compression. Default
+ is 0.0 following the fpack defaults.
+ hcomp_smooth: bool
+ If True, apply smoothing when decompressing. Default False
+
+ header: FITSHDR, list, dict, optional
+ This is only used to determine how many slots to reserve for
+ header keywords
+
+ restrictions
+ ------------
+ The File must be opened READWRITE
+ """
+
+ if (img is not None) or (img is None and dims is None):
+ from_image = True
+ elif dims is not None:
+ from_image = False
+
+ if from_image:
+ img2send = img
+ if img is not None:
+ dims = img.shape
+ dtstr = img.dtype.descr[0][1][1:]
+ if img.size == 0:
+ raise ValueError("data must have at least 1 row")
+
+ # data must be c-contiguous and native byte order
+ if not img.flags['C_CONTIGUOUS']:
+ # this always makes a copy
+ img2send = numpy.ascontiguousarray(img)
+ array_to_native(img2send, inplace=True)
+ else:
+ img2send = array_to_native(img, inplace=False)
+
+ if IS_PY3 and img2send.dtype.char == 'U':
+ # for python3, we convert unicode to ascii
+ # this will error if the character is not in ascii
+ img2send = img2send.astype('S', copy=copy_if_needed)
+
+ else:
+ self._ensure_empty_image_ok()
+ compress = None
+ tile_dims = None
+
+ # we get dims from the input image
+ dims2send = None
+ else:
+ # img was None and dims was sent
+ if dtype is None:
+ raise ValueError("send dtype= with dims=")
+
+ # this must work!
+ dtype = numpy.dtype(dtype)
+ dtstr = dtype.descr[0][1][1:]
+ # use the example image to build the type in C
+ img2send = numpy.zeros(1, dtype=dtype)
+
+ # sending an array simplifies access
+ dims2send = numpy.array(dims, dtype='i8', ndmin=1)
+
+ if img2send is not None:
+ if img2send.dtype.fields is not None:
+ raise ValueError(
+ "got record data type, expected regular ndarray")
+
+ if extname is None:
+ # will be ignored
+ extname = ""
+ else:
+ if not isstring(extname):
+ raise ValueError("extension name must be a string")
+ extname = mks(extname)
+
+ if extname is not None and extver is not None:
+ extver = check_extver(extver)
+
+ if extver is None:
+ # will be ignored
+ extver = 0
+
+ comptype = get_compress_type(compress)
+ qmethod = get_qmethod(qmethod)
+ dither_seed = get_dither_seed(dither_seed)
+
+ tile_dims = get_tile_dims(tile_dims, dims)
+ if qlevel is None:
+ # 0.0 is the sentinel value for "no quantization" in cfitsio
+ qlevel = 0.0
+ else:
+ qlevel = float(qlevel)
+
+ if img2send is not None:
+ check_comptype_img(comptype, dtstr)
+
+ if header is not None:
+ nkeys = len(header)
+ else:
+ nkeys = 0
+
+ if hcomp_smooth:
+ hcomp_smooth = 1
+ else:
+ hcomp_smooth = 0
+
+ self._FITS.create_image_hdu(
+ img2send,
+ nkeys,
+ dims=dims2send,
+ comptype=comptype,
+ tile_dims=tile_dims,
+
+ qlevel=qlevel,
+ qmethod=qmethod,
+ dither_seed=dither_seed,
+
+ hcomp_scale=hcomp_scale,
+ hcomp_smooth=hcomp_smooth,
+
+ extname=extname,
+ extver=extver,
+ )
+
+ if compress is not None and qlevel is None or qlevel == 0.0:
+ # work around bug in cfitso
+ self.reopen()
+ else:
+ # don't rebuild the whole list unless this is the first hdu
+ # to be created
+ self.update_hdu_list(rebuild=False)
+
+ def _ensure_empty_image_ok(self):
+ """
+ If ignore_empty was not set to True, we only allow empty HDU for first
+ HDU and if there is no data there already
+ """
+ if self.ignore_empty:
+ return
+
+ if len(self) > 1:
+ raise RuntimeError(
+ "Cannot write None image at extension %d" % len(self))
+ if 'ndims' in self[0]._info:
+ raise RuntimeError("Can only write None images to extension zero, "
+ "which already exists")
+
+ def write_table(self, data, table_type='binary',
+ names=None, formats=None, units=None,
+ extname=None, extver=None, header=None,
+ write_bitcols=False):
+ """
+ Create a new table extension and write the data.
+
+ The table definition is taken from the fields in the input array. If
+ you want to append new rows to the table, access the HDU directly and
+ use the write() function, e.g.
+
+ fits[extension].append(data)
+
+ parameters
+ ----------
+ data: recarray
+ A numpy array with fields. The table definition will be
+ determined from this array.
+ table_type: string, optional
+ Either 'binary' or 'ascii', default 'binary'
+ Matching is case-insensitive
+ extname: string, optional
+ An optional string for the extension name.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname).
+ These extensions can optionally specify an EXTVER version number in
+ the header. Send extver= to set a particular version, which will
+ be represented in the header with keyname EXTVER. The extver must
+ be an integer > 0. If extver is not sent, the first one will be
+ selected. If ext is an integer, the extver is ignored.
+ units: list/dec, optional:
+ A list of strings with units for each column.
+ header: FITSHDR, list, dict, optional
+ A set of header keys to write. The keys are written before the data
+ is written to the table, preventing a resizing of the table area.
+
+ Can be one of these:
+ - FITSHDR object
+ - list of dictionaries containing 'name','value' and optionally
+ a 'comment' field; the order is preserved.
+ - a dictionary of keyword-value pairs; no comments are written
+ in this case, and the order is arbitrary.
+ Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
+ write_bitcols: boolean, optional
+ Write boolean arrays in the FITS bitcols format, default False
+
+ restrictions
+ ------------
+ The File must be opened READWRITE
+ """
+
+ """
+ if data.dtype.fields == None:
+ raise ValueError("data must have fields")
+ if data.size == 0:
+ raise ValueError("data must have at least 1 row")
+ """
+
+ self.create_table_hdu(data=data,
+ header=header,
+ names=names,
+ units=units,
+ extname=extname,
+ extver=extver,
+ table_type=table_type,
+ write_bitcols=write_bitcols)
+
+ if header is not None:
+ self[-1].write_keys(header)
+ self[-1]._update_info()
+
+ self[-1].write(data, names=names)
+
+ def read_raw(self):
+ """
+ Reads the raw FITS file contents, returning a Python string.
+ """
+ return self._FITS.read_raw()
+
+ def create_table_hdu(self, data=None, dtype=None,
+ header=None,
+ names=None, formats=None,
+ units=None, dims=None, extname=None, extver=None,
+ table_type='binary', write_bitcols=False):
+ """
+ Create a new, empty table extension and reload the hdu list.
+
+ There are three ways to do it:
+ 1) send a numpy dtype, from which the formats in the fits file will
+ be determined.
+ 2) Send an array in data= keyword. this is required if you have
+ object fields for writing to variable length columns.
+ 3) send the names,formats and dims yourself
+
+ You can then write data into the new extension using
+ fits[extension].write(array)
+ If you want to write to a single column
+ fits[extension].write_column(array)
+ But be careful as the other columns will be left zeroed.
+
+ Often you will instead just use write_table to do this all
+ atomically.
+
+ fits.write_table(recarray)
+
+ write_table will create the new table extension for you with the
+ appropriate fields.
+
+ parameters
+ ----------
+ dtype: numpy dtype or descriptor, optional
+ If you have an array with fields, you can just send arr.dtype. You
+ can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or
+ a dictionary representation.
+ data: a numpy array with fields, optional
+ or a dictionary
+
+ An array or dict from which to determine the table definition. You
+ must use this instead of sending a descriptor if you have object
+ array fields, as this is the only way to determine the type and max
+ size.
+
+ names: list of strings, optional
+ The list of field names
+ formats: list of strings, optional
+ The TFORM format strings for each field.
+ dims: list of strings, optional
+ An optional list of dimension strings for each field. Should
+ match the repeat count for the formats fields. Be careful of
+ the order since FITS is more like fortran. See the descr2tabledef
+ function.
+
+ table_type: string, optional
+ Either 'binary' or 'ascii', default 'binary'
+ Matching is case-insensitive
+ units: list of strings, optional
+ An optional list of unit strings for each field.
+ extname: string, optional
+ An optional extension name.
+ extver: integer, optional
+ FITS allows multiple extensions to have the same name (extname).
+ These extensions can optionally specify an EXTVER version number in
+ the header. Send extver= to set a particular version, which will
+ be represented in the header with keyname EXTVER. The extver must
+ be an integer > 0. If extver is not sent, the first one will be
+ selected. If ext is an integer, the extver is ignored.
+ write_bitcols: bool, optional
+ Write boolean arrays in the FITS bitcols format, default False
+
+ header: FITSHDR, list, dict, optional
+ This is only used to determine how many slots to reserve for
+ header keywords
+
+
+ restrictions
+ ------------
+ The File must be opened READWRITE
+ """
+
+ # record this for the TableHDU object
+ write_bitcols = self.write_bitcols or write_bitcols
+
+ # can leave as turn
+ table_type_int = _extract_table_type(table_type)
+
+ if data is not None:
+ if isinstance(data, numpy.ndarray):
+ names, formats, dims = array2tabledef(
+ data, table_type=table_type, write_bitcols=write_bitcols)
+ elif isinstance(data, (list, dict)):
+ names, formats, dims = collection2tabledef(
+ data, names=names, table_type=table_type,
+ write_bitcols=write_bitcols)
+ else:
+ raise ValueError(
+ "data must be an ndarray with fields or a dict")
+ elif dtype is not None:
+ dtype = numpy.dtype(dtype)
+ names, formats, dims = descr2tabledef(
+ dtype.
+ descr,
+ write_bitcols=write_bitcols,
+ table_type=table_type,
+ )
+ else:
+ if names is None or formats is None:
+ raise ValueError(
+ "send either dtype=, data=, or names= and formats=")
+
+ if not isinstance(names, list) or not isinstance(formats, list):
+ raise ValueError("names and formats should be lists")
+ if len(names) != len(formats):
+ raise ValueError("names and formats must be same length")
+
+ if dims is not None:
+ if not isinstance(dims, list):
+ raise ValueError("dims should be a list")
+ if len(dims) != len(names):
+ raise ValueError("names and dims must be same length")
+
+ if units is not None:
+ if not isinstance(units, list):
+ raise ValueError("units should be a list")
+ if len(units) != len(names):
+ raise ValueError("names and units must be same length")
+
+ if extname is None:
+ # will be ignored
+ extname = ""
+ else:
+ if not isstring(extname):
+ raise ValueError("extension name must be a string")
+ extname = mks(extname)
+
+ if extname is not None and extver is not None:
+ extver = check_extver(extver)
+ if extver is None:
+ # will be ignored
+ extver = 0
+ if extname is None:
+ # will be ignored
+ extname = ""
+
+ if header is not None:
+ nkeys = len(header)
+ else:
+ nkeys = 0
+
+ # note we can create extname in the c code for tables, but not images
+ self._FITS.create_table_hdu(table_type_int, nkeys,
+ names, formats, tunit=units, tdim=dims,
+ extname=extname, extver=extver)
+
+ # don't rebuild the whole list unless this is the first hdu
+ # to be created
+ self.update_hdu_list(rebuild=False)
+
+ def update_hdu_list(self, rebuild=True):
+ """
+ Force an update of the entire HDU list
+
+ Normally you don't need to call this method directly
+
+ if rebuild is false or the hdu_list is not yet set, the list is
+ rebuilt from scratch
+ """
+ if not hasattr(self, 'hdu_list'):
+ rebuild = True
+
+ if rebuild:
+ self.hdu_list = []
+ self.hdu_map = {}
+
+ # we don't know how many hdus there are, so iterate
+ # until we can't open any more
+ ext_start = 0
+ else:
+ # start from last
+ ext_start = len(self)
+
+ ext = ext_start
+ while True:
+ try:
+ self._append_hdu_info(ext)
+ except IOError:
+ break
+ except RuntimeError:
+ break
+
+ ext = ext + 1
+
+ def _append_hdu_info(self, ext):
+ """
+ internal routine
+
+ append info for indiciated extension
+ """
+
+ # raised IOError if not found
+ hdu_type = self.movabs_ext(ext)
+
+ if hdu_type == IMAGE_HDU:
+ hdu = ImageHDU(self._FITS, ext)
+ elif hdu_type == BINARY_TBL:
+ hdu = TableHDU(
+ self._FITS, ext,
+ lower=self.lower, upper=self.upper,
+ trim_strings=self.trim_strings,
+ vstorage=self.vstorage, case_sensitive=self.case_sensitive,
+ iter_row_buffer=self.iter_row_buffer,
+ write_bitcols=self.write_bitcols)
+ elif hdu_type == ASCII_TBL:
+ hdu = AsciiTableHDU(
+ self._FITS, ext,
+ lower=self.lower, upper=self.upper,
+ trim_strings=self.trim_strings,
+ vstorage=self.vstorage, case_sensitive=self.case_sensitive,
+ iter_row_buffer=self.iter_row_buffer,
+ write_bitcols=self.write_bitcols)
+ else:
+ mess = ("extension %s is of unknown type %s "
+ "this is probably a bug")
+ mess = mess % (ext, hdu_type)
+ raise IOError(mess)
+
+ self.hdu_list.append(hdu)
+ self.hdu_map[ext] = hdu
+
+ extname = hdu.get_extname()
+ if not self.case_sensitive:
+ extname = extname.lower()
+ if extname != '':
+ # this will guarantee we default to *first* version,
+ # if version is not requested, using __getitem__
+ if extname not in self.hdu_map:
+ self.hdu_map[extname] = hdu
+
+ ver = hdu.get_extver()
+ if ver > 0:
+ key = '%s-%s' % (extname, ver)
+ self.hdu_map[key] = hdu
+
+ def __iter__(self):
+ """
+ begin iteration over HDUs
+ """
+ if not hasattr(self, 'hdu_list'):
+ self.update_hdu_list()
+ self._iter_index = 0
+ return self
+
+ def next(self):
+ """
+ Move to the next iteration
+ """
+ if self._iter_index == len(self.hdu_list):
+ raise StopIteration
+ hdu = self.hdu_list[self._iter_index]
+ self._iter_index += 1
+ return hdu
+
+ __next__ = next
+
+ def __len__(self):
+ """
+ get the number of extensions
+ """
+ if not hasattr(self, 'hdu_list'):
+ self.update_hdu_list()
+ return len(self.hdu_list)
+
+ def _extract_item(self, item):
+ """
+ utility function to extract an "item", meaning
+ a extension number,name plus version.
+ """
+ ver = 0
+ if isinstance(item, tuple):
+ ver_sent = True
+ nitem = len(item)
+ if nitem == 1:
+ ext = item[0]
+ elif nitem == 2:
+ ext, ver = item
+ else:
+ ver_sent = False
+ ext = item
+ return ext, ver, ver_sent
+
+ def __getitem__(self, item):
+ """
+ Get an hdu by number, name, and possibly version
+ """
+ if not hasattr(self, 'hdu_list'):
+ if self._did_create:
+ # we created the file and haven't written anything yet
+ raise ValueError("Requested hdu '%s' not present" % item)
+
+ self.update_hdu_list()
+
+ if len(self) == 0:
+ raise ValueError("Requested hdu '%s' not present" % item)
+
+ ext, ver, ver_sent = self._extract_item(item)
+
+ try:
+ # if it is an int
+ hdu = self.hdu_list[ext]
+ except Exception:
+ # might be a string
+ ext = mks(ext)
+ if not self.case_sensitive:
+ mess = '(case insensitive)'
+ ext = ext.lower()
+ else:
+ mess = '(case sensitive)'
+
+ if ver > 0:
+ key = '%s-%s' % (ext, ver)
+ if key not in self.hdu_map:
+ raise IOError("extension not found: %s, "
+ "version %s %s" % (ext, ver, mess))
+ hdu = self.hdu_map[key]
+ else:
+ if ext not in self.hdu_map:
+ raise IOError("extension not found: %s %s" % (ext, mess))
+ hdu = self.hdu_map[ext]
+
+ return hdu
+
+ def __contains__(self, item):
+ """
+ tell whether specified extension exists, possibly
+ with version sent as well
+ """
+ try:
+ hdu = self[item] # noqa
+ return True
+ except Exception:
+ return False
+
+ def __repr__(self):
+ """
+ Text representation of some fits file metadata
+ """
+ spacing = ' '*2
+ rep = ['']
+ rep.append("%sfile: %s" % (spacing, self._filename))
+ rep.append("%smode: %s" % (spacing, _modeprint_map[self.intmode]))
+
+ rep.append('%sextnum %-15s %s' % (spacing, "hdutype", "hduname[v]"))
+
+ if not hasattr(self, 'hdu_list'):
+ if not self._did_create:
+ # we expect some stuff
+ self.update_hdu_list()
+
+ for i, hdu in enumerate(self.hdu_list):
+ t = hdu._info['hdutype']
+ name = hdu.get_extname()
+ if name != '':
+ ver = hdu.get_extver()
+ if ver != 0:
+ name = '%s[%s]' % (name, ver)
+
+ rep.append(
+ "%s%-6d %-15s %s" % (
+ spacing, i, _hdu_type_map[t], name
+ )
+ )
+
+ rep = '\n'.join(rep)
+ return rep
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ self.close()
+
+
+def check_extver(extver):
+ if extver is None:
+ return 0
+ extver = int(extver)
+ if extver <= 0:
+ raise ValueError("extver must be > 0")
+ return extver
+
+
+def extract_filename(filename):
+ filename = mks(filename)
+ filename = filename.strip()
+ if filename[0] == "!":
+ filename = filename[1:]
+ filename = os.path.expandvars(filename)
+ filename = os.path.expanduser(filename)
+ return filename
+
+
+def array2tabledef(data, table_type='binary', write_bitcols=False):
+ """
+ Similar to descr2tabledef but if there are object columns a type
+ and max length will be extracted and used for the tabledef
+ """
+ is_ascii = (table_type == 'ascii')
+
+ if data.dtype.fields is None:
+ raise ValueError("data must have fields")
+ names = []
+ names_nocase = {}
+ formats = []
+ dims = []
+
+ descr = data.dtype.descr
+ for d in descr:
+ # these have the form '<f4' or '|S25', etc. Extract the pure type
+ npy_dtype = d[1][1:]
+ if is_ascii:
+ if npy_dtype in ['u1', 'i1']:
+ raise ValueError(
+ "1-byte integers are not supported for "
+ "ascii tables: '%s'" % npy_dtype)
+ if npy_dtype in ['u2']:
+ raise ValueError(
+ "unsigned 2-byte integers are not supported for "
+ "ascii tables: '%s'" % npy_dtype)
+
+ if npy_dtype[0] == 'O':
+ # this will be a variable length column 1Pt(len) where t is the
+ # type and len is max length. Each element must be convertible to
+ # the same type as the first
+ name = d[0]
+ form, dim = npy_obj2fits(data, name)
+ elif npy_dtype[0] == "V":
+ continue
+ else:
+ name, form, dim = _npy2fits(
+ d, table_type=table_type, write_bitcols=write_bitcols)
+
+ if name == '':
+ raise ValueError("field name is an empty string")
+
+ """
+ if is_ascii:
+ if dim is not None:
+ raise ValueError("array columns are not supported for "
+ "ascii tables")
+ """
+ name_nocase = name.upper()
+ if name_nocase in names_nocase:
+ raise ValueError(
+ "duplicate column name found: '%s'. Note "
+ "FITS column names are not case sensitive" % name_nocase)
+
+ names.append(name)
+ names_nocase[name_nocase] = name_nocase
+
+ formats.append(form)
+ dims.append(dim)
+
+ return names, formats, dims
+
+
+def collection2tabledef(
+ data, names=None, table_type='binary', write_bitcols=False):
+ if isinstance(data, dict):
+ if names is None:
+ names = list(data.keys())
+ isdict = True
+ elif isinstance(data, list):
+ if names is None:
+ raise ValueError("For list of array, send names=")
+ isdict = False
+ else:
+ raise ValueError("expected a dict")
+
+ is_ascii = (table_type == 'ascii')
+ formats = []
+ dims = []
+
+ for i, name in enumerate(names):
+
+ if isdict:
+ this_data = data[name]
+ else:
+ this_data = data[i]
+
+ dt = this_data.dtype.descr[0]
+ dname = dt[1][1:]
+
+ if is_ascii:
+ if dname in ['u1', 'i1']:
+ raise ValueError(
+ "1-byte integers are not supported for "
+ "ascii tables: '%s'" % dname)
+ if dname in ['u2']:
+ raise ValueError(
+ "unsigned 2-byte integers are not supported for "
+ "ascii tables: '%s'" % dname)
+
+ if dname[0] == 'O':
+ # this will be a variable length column 1Pt(len) where t is the
+ # type and len is max length. Each element must be convertible to
+ # the same type as the first
+ form, dim = npy_obj2fits(this_data)
+ else:
+ send_dt = dt
+ if len(this_data.shape) > 1:
+ send_dt = list(dt) + [this_data.shape[1:]]
+ _, form, dim = _npy2fits(
+ send_dt, table_type=table_type, write_bitcols=write_bitcols)
+
+ formats.append(form)
+ dims.append(dim)
+
+ return names, formats, dims
+
+
+def descr2tabledef(descr, table_type='binary', write_bitcols=False):
+ """
+ Create a FITS table def from the input numpy descriptor.
+
+ parameters
+ ----------
+ descr: list
+ A numpy recarray type descriptor array.dtype.descr
+
+ returns
+ -------
+ names, formats, dims: tuple of lists
+ These are the ttyp, tform and tdim header entries
+ for each field. dim entries may be None
+ """
+ names = []
+ formats = []
+ dims = []
+
+ for d in descr:
+
+ """
+ npy_dtype = d[1][1:]
+ if is_ascii and npy_dtype in ['u1','i1']:
+ raise ValueError("1-byte integers are not supported for "
+ "ascii tables")
+ """
+
+ if d[1][1] == 'O':
+ raise ValueError(
+ 'cannot automatically declare a var column without '
+ 'some data to determine max len')
+
+ name, form, dim = _npy2fits(
+ d, table_type=table_type, write_bitcols=write_bitcols)
+
+ if name == '':
+ raise ValueError("field name is an empty string")
+
+ """
+ if is_ascii:
+ if dim is not None:
+ raise ValueError("array columns are not supported "
+ "for ascii tables")
+ """
+
+ names.append(name)
+ formats.append(form)
+ dims.append(dim)
+
+ return names, formats, dims
+
+
+def npy_obj2fits(data, name=None):
+ # this will be a variable length column 1Pt(len) where t is the
+ # type and len is max length. Each element must be convertible to
+ # the same type as the first
+
+ if name is None:
+ d = data.dtype.descr
+ first = data[0]
+ else:
+ d = data[name].dtype.descr # noqa - not used
+ first = data[name][0]
+
+ # note numpy._string is an instance of str in python2, bytes
+ # in python3
+ if isinstance(first, str) or (IS_PY3 and isinstance(first, bytes)):
+ if IS_PY3:
+ if isinstance(first, str):
+ fits_dtype = _table_npy2fits_form['U']
+ else:
+ fits_dtype = _table_npy2fits_form['S']
+ else:
+ fits_dtype = _table_npy2fits_form['S']
+ else:
+ arr0 = numpy.array(first, copy=copy_if_needed)
+ dtype0 = arr0.dtype
+ npy_dtype = dtype0.descr[0][1][1:]
+ if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
+ raise ValueError("Field '%s' is an arrays of strings, this is "
+ "not allowed in variable length columns" % name)
+ if npy_dtype not in _table_npy2fits_form:
+ raise ValueError(
+ "Field '%s' has unsupported type '%s'" % (name, npy_dtype))
+ fits_dtype = _table_npy2fits_form[npy_dtype]
+
+ # Q uses 64-bit addressing, should try at some point but the cfitsio manual
+ # says it is experimental
+ # form = '1Q%s' % fits_dtype
+ form = '1P%s' % fits_dtype
+ dim = None
+
+ return form, dim
+
+
+def get_tile_dims(tile_dims, imshape):
+ """
+ Just make sure the tile dims has the appropriate number of dimensions
+ """
+
+ if tile_dims is None:
+ td = None
+ else:
+ td = numpy.array(tile_dims, dtype='i8')
+ nd = len(imshape)
+ if td.size != nd:
+ msg = "expected tile_dims to have %d dims, got %d" % (td.size, nd)
+ raise ValueError(msg)
+
+ return td
+
+
+def get_compress_type(compress):
+ if compress is not None:
+ compress = str(compress).upper()
+ if compress not in _compress_map:
+ raise ValueError(
+ "compress must be one of %s" % list(_compress_map.keys()))
+ return _compress_map[compress]
+
+
+def get_qmethod(qmethod):
+ if qmethod not in _qmethod_map:
+ if isinstance(qmethod, str):
+ qmethod = qmethod.upper()
+ elif isinstance(qmethod, bytes):
+ # in py27, bytes are str, so we can safely assume
+ # py3 here
+ qmethod = str(qmethod, 'ascii').upper()
+
+ if qmethod not in _qmethod_map:
+ raise ValueError(
+ "qmethod must be one of %s" % list(_qmethod_map.keys()))
+
+ return _qmethod_map[qmethod]
+
+
+def get_dither_seed(dither_seed):
+ """
+ Convert a seed value or indicator to the approprate integer value for
+ cfitsio
+
+ Parameters
+ ----------
+ dither_seed: number or string
+ Seed for the subtractive dither. Seeding makes the lossy compression
+ reproducible. Allowed values are
+ None or 0 or 'clock':
+ Return 0, do not set the seed explicitly, use the system clock
+ negative or 'checksum':
+ Return -1, means Set the seed based on the data checksum
+ 1-10_000:
+ use the input seed
+ """
+ if isinstance(dither_seed, bytes):
+ dither_seed = str(dither_seed, 'utf-8')
+
+ if isinstance(dither_seed, str):
+ dlow = dither_seed.lower()
+ if dlow == 'clock':
+ seed_out = 0
+ elif dlow == 'checksum':
+ seed_out = -1
+ else:
+ raise ValueError(f'Bad dither_seed {dither_seed}')
+ elif dither_seed is None:
+ seed_out = 0
+ else:
+ # must fit in an int
+ seed_out = numpy.int32(dither_seed)
+
+ if seed_out > 10_000:
+ raise ValueError(
+ f'Got dither_seed {seed_out}, expected avalue <= 10_000'
+ )
+
+ return seed_out
+
+
+def check_comptype_img(comptype, dtype_str):
+
+ if comptype == NOCOMPRESS:
+ return
+
+ # if dtype_str == 'i8':
+ # no i8 allowed for tile-compressed images
+ # raise ValueError("8-byte integers not supported when "
+ # "using tile compression")
+
+ if comptype == PLIO_1:
+ # no unsigned u4/u8 for plio
+ if dtype_str == 'u4' or dtype_str == 'u8':
+ raise ValueError("Unsigned 4/8-byte integers currently not "
+ "allowed when writing using PLIO "
+ "tile compression")
+
+
+def _extract_table_type(type):
+ """
+ Get the numerical table type
+ """
+ if isinstance(type, str):
+ type = type.lower()
+ if type[0:7] == 'binary':
+ table_type = BINARY_TBL
+ elif type[0:6] == 'ascii':
+ table_type = ASCII_TBL
+ else:
+ raise ValueError(
+ "table type string should begin with 'binary' or 'ascii' "
+ "(case insensitive)")
+ else:
+ type = int(type)
+ if type not in [BINARY_TBL, ASCII_TBL]:
+ raise ValueError(
+ "table type num should be BINARY_TBL (%d) or "
+ "ASCII_TBL (%d)" % (BINARY_TBL, ASCII_TBL))
+ table_type = type
+
+ return table_type
+
+
+_compress_map = {
+ None: NOCOMPRESS,
+ 'RICE': RICE_1,
+ 'RICE_1': RICE_1,
+ 'GZIP': GZIP_1,
+ 'GZIP_1': GZIP_1,
+ 'GZIP_2': GZIP_2,
+ 'PLIO': PLIO_1,
+ 'PLIO_1': PLIO_1,
+ 'HCOMPRESS': HCOMPRESS_1,
+ 'HCOMPRESS_1': HCOMPRESS_1,
+ NOCOMPRESS: None,
+ RICE_1: 'RICE_1',
+ GZIP_1: 'GZIP_1',
+ GZIP_2: 'GZIP_2',
+ PLIO_1: 'PLIO_1',
+ HCOMPRESS_1: 'HCOMPRESS_1',
+}
+
+_qmethod_map = {
+ None: NO_DITHER,
+ 'NO_DITHER': NO_DITHER,
+ 'SUBTRACTIVE_DITHER_1': SUBTRACTIVE_DITHER_1,
+ 'SUBTRACTIVE_DITHER_2': SUBTRACTIVE_DITHER_2,
+ NO_DITHER: NO_DITHER,
+ SUBTRACTIVE_DITHER_1: SUBTRACTIVE_DITHER_1,
+ SUBTRACTIVE_DITHER_2: SUBTRACTIVE_DITHER_2,
+}
+
+_modeprint_map = {
+ 'r': 'READONLY', 'rw': 'READWRITE', 0: 'READONLY', 1: 'READWRITE'}
+_char_modemap = {
+ 'r': 'r', 'rw': 'rw',
+ READONLY: 'r', READWRITE: 'rw'}
+_int_modemap = {
+ 'r': READONLY, 'rw': READWRITE, READONLY: READONLY, READWRITE: READWRITE}
--- /dev/null
+from .base import ( # noqa
+ ANY_HDU, BINARY_TBL, ASCII_TBL, IMAGE_HDU, _hdu_type_map)
+from .image import ImageHDU # noqa
+from .table import ( # noqa
+ TableHDU,
+ AsciiTableHDU,
+ _table_npy2fits_form,
+ _npy2fits,
+)
--- /dev/null
+import re
+import copy
+import warnings
+
+from ..util import _stypes, _itypes, _ftypes, FITSRuntimeWarning
+from ..header import FITSHDR
+
+INVALID_HDR_CHARS_RE = re.compile(r"(\?|\*|#)+")
+INVALID_HDR_CHARS = {"?", "*", "#"}
+ANY_HDU = -1
+IMAGE_HDU = 0
+ASCII_TBL = 1
+BINARY_TBL = 2
+
+_hdu_type_map = {
+ IMAGE_HDU: 'IMAGE_HDU',
+ ASCII_TBL: 'ASCII_TBL',
+ BINARY_TBL: 'BINARY_TBL',
+ 'IMAGE_HDU': IMAGE_HDU,
+ 'ASCII_TBL': ASCII_TBL,
+ 'BINARY_TBL': BINARY_TBL}
+
+
+class HDUBase(object):
+ """
+ A representation of a FITS HDU
+
+ parameters
+ ----------
+ fits: FITS object
+ An instance of a _fistio_wrap.FITS object. This is the low-level
+ python object, not the FITS object defined above.
+ ext: integer
+ The extension number.
+ """
+ def __init__(self, fits, ext, **keys):
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ self._FITS = fits
+ self._ext = ext
+ self._ignore_scaling = False
+
+ self._update_info()
+ self._filename = self._FITS.filename()
+
+ @property
+ def ignore_scaling(self):
+ """
+ :return: Flag to indicate whether scaling (BZERO/BSCALE) values should
+ be ignored.
+ """
+ return self._ignore_scaling
+
+ @ignore_scaling.setter
+ def ignore_scaling(self, ignore_scaling_flag):
+ """
+ Set the flag to ignore scaling.
+ """
+ old_val = self._ignore_scaling
+ self._ignore_scaling = ignore_scaling_flag
+
+ # Only endure the overhead of updating the info if the new value is
+ # actually different.
+ if old_val != self._ignore_scaling:
+ self._update_info()
+
+ def get_extnum(self):
+ """
+ Get the extension number
+ """
+ return self._ext
+
+ def get_extname(self):
+ """
+ Get the name for this extension, can be an empty string
+ """
+ name = self._info['extname']
+ if name.strip() == '':
+ name = self._info['hduname']
+ return name.strip()
+
+ def get_extver(self):
+ """
+ Get the version for this extension.
+
+ Used when a name is given to multiple extensions
+ """
+ ver = self._info['extver']
+ if ver == 0:
+ ver = self._info['hduver']
+ return ver
+
+ def get_exttype(self, num=False):
+ """
+ Get the extension type
+
+ By default the result is a string that mirrors
+ the enumerated type names in cfitsio
+ 'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL'
+ which have numeric values
+ 0 1 2
+ send num=True to get the numbers. The values
+ fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL
+ are available for comparison
+
+ parameters
+ ----------
+ num: bool, optional
+ Return the numeric values.
+ """
+ if num:
+ return self._info['hdutype']
+ else:
+ name = _hdu_type_map[self._info['hdutype']]
+ return name
+
+ def get_offsets(self):
+ """
+ returns
+ -------
+ a dictionary with these entries
+
+ header_start:
+ byte offset from beginning of the file to the start
+ of the header
+ data_start:
+ byte offset from beginning of the file to the start
+ of the data section
+ data_end:
+ byte offset from beginning of the file to the end
+ of the data section
+
+ Note these are also in the information dictionary, which
+ you can access with get_info()
+ """
+ return dict(
+ header_start=self._info['header_start'],
+ data_start=self._info['data_start'],
+ data_end=self._info['data_end'],
+ )
+
+ def get_info(self):
+ """
+ Get a copy of the internal dictionary holding extension information
+ """
+ return copy.deepcopy(self._info)
+
+ def get_filename(self):
+ """
+ Get a copy of the filename for this fits file
+ """
+ return copy.copy(self._filename)
+
+ def write_checksum(self):
+ """
+ Write the checksum into the header for this HDU.
+
+ Computes the checksum for the HDU, both the data portion alone (DATASUM
+ keyword) and the checksum complement for the entire HDU (CHECKSUM).
+
+ returns
+ -------
+ A dict with keys 'datasum' and 'hdusum'
+ """
+ return self._FITS.write_checksum(self._ext+1)
+
+ def verify_checksum(self):
+ """
+ Verify the checksum in the header for this HDU.
+ """
+ res = self._FITS.verify_checksum(self._ext+1)
+ if res['dataok'] != 1:
+ raise ValueError("data checksum failed")
+ if res['hduok'] != 1:
+ raise ValueError("hdu checksum failed")
+
+ def write_comment(self, comment):
+ """
+ Write a comment into the header
+ """
+ self._FITS.write_comment(self._ext+1, str(comment))
+
+ def write_history(self, history):
+ """
+ Write history text into the header
+ """
+ self._FITS.write_history(self._ext+1, str(history))
+
+ def _write_continue(self, value):
+ """
+ Write history text into the header
+ """
+ self._FITS.write_continue(self._ext+1, str(value))
+
+ def write_key(self, name, value, comment=""):
+ """
+ Write the input value to the header
+
+ parameters
+ ----------
+ name: string
+ Name of keyword to write/update
+ value: scalar
+ Value to write, can be string float or integer type,
+ including numpy scalar types.
+ comment: string, optional
+ An optional comment to write for this key
+
+ Notes
+ -----
+ Write COMMENT and HISTORY using the write_comment and write_history
+ methods
+ """
+
+ if name is None:
+
+ # we write a blank keyword and the rest is a comment
+ # string
+
+ if not isinstance(comment, _stypes):
+ raise ValueError('when writing blank key the value '
+ 'must be a string')
+
+ # this might be longer than 80 but that's ok, the routine
+ # will take care of it
+ # card = ' ' + str(comment)
+ card = ' ' + str(comment)
+ self._FITS.write_record(
+ self._ext+1,
+ card,
+ )
+
+ elif value is None:
+ self._FITS.write_undefined_key(self._ext+1,
+ str(name),
+ str(comment))
+
+ elif isinstance(value, bool):
+ if value:
+ v = 1
+ else:
+ v = 0
+ self._FITS.write_logical_key(self._ext+1,
+ str(name),
+ v,
+ str(comment))
+ elif isinstance(value, _stypes):
+ self._FITS.write_string_key(self._ext+1,
+ str(name),
+ str(value),
+ str(comment))
+ elif isinstance(value, _ftypes):
+ self._FITS.write_double_key(self._ext+1,
+ str(name),
+ float(value),
+ str(comment))
+ elif isinstance(value, _itypes):
+ self._FITS.write_long_long_key(self._ext+1,
+ str(name),
+ int(value),
+ str(comment))
+ elif isinstance(value, (tuple, list)):
+ vl = [str(el) for el in value]
+ sval = ','.join(vl)
+ self._FITS.write_string_key(self._ext+1,
+ str(name),
+ sval,
+ str(comment))
+ else:
+ sval = str(value)
+ mess = (
+ "warning, keyword '%s' has non-standard "
+ "value type %s, "
+ "Converting to string: '%s'")
+ warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning)
+ self._FITS.write_string_key(self._ext+1,
+ str(name),
+ sval,
+ str(comment))
+
+ def write_keys(self, records_in, clean=True):
+ """
+ Write the keywords to the header.
+
+ parameters
+ ----------
+ records: FITSHDR or list or dict
+ Can be one of these:
+ - FITSHDR object
+ - list of dictionaries containing 'name','value' and optionally
+ a 'comment' field; the order is preserved.
+ - a dictionary of keyword-value pairs; no comments are written
+ in this case, and the order is arbitrary.
+ clean: boolean
+ If True, trim out the standard fits header keywords that are
+ created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM,
+ TDIM, XTENSION, BITPIX, NAXIS, etc.
+
+ Notes
+ -----
+ Input keys named COMMENT and HISTORY are written using the
+ write_comment and write_history methods.
+ """
+
+ if isinstance(records_in, FITSHDR):
+ hdr = records_in
+ else:
+ hdr = FITSHDR(records_in)
+
+ if clean:
+ is_table = hasattr(self, '_table_type_str')
+ # is_table = isinstance(self, TableHDU)
+ hdr.clean(is_table=is_table)
+
+ for r in hdr.records():
+ name = r['name']
+ if name is not None:
+ name = name.upper()
+
+ if INVALID_HDR_CHARS_RE.search(name):
+ raise RuntimeError(
+ "header key '%s' has invalid characters! "
+ "Characters in %s are not allowed!" % (
+ name, INVALID_HDR_CHARS
+ )
+ )
+
+ value = r['value']
+
+ if name == 'COMMENT':
+ self.write_comment(value)
+ elif name == 'HISTORY':
+ self.write_history(value)
+ elif name == 'CONTINUE':
+ self._write_continue(value)
+ else:
+ comment = r.get('comment', '')
+ self.write_key(name, value, comment=comment)
+
+ def read_header(self):
+ """
+ Read the header as a FITSHDR
+
+ The FITSHDR allows access to the values and comments by name and
+ number.
+ """
+ # note converting strings
+ return FITSHDR(self.read_header_list())
+
+ def read_header_list(self):
+ """
+ Read the header as a list of dictionaries.
+
+ You will usually use read_header instead, which just sends the output
+ of this functioin to the constructor of a FITSHDR, which allows access
+ to the values and comments by name and number.
+
+ Each dictionary is
+ 'name': the keyword name
+ 'value': the value field as a string
+ 'comment': the comment field as a string.
+ """
+ return self._FITS.read_header(self._ext+1)
+
+ def _update_info(self):
+ """
+ Update metadata for this HDU
+ """
+ try:
+ self._FITS.movabs_hdu(self._ext+1)
+ except IOError:
+ raise RuntimeError("no such hdu")
+
+ self._info = self._FITS.get_hdu_info(self._ext+1, self._ignore_scaling)
+
+ def _get_repr_list(self):
+ """
+ Get some representation data common to all HDU types
+ """
+ spacing = ' '*2
+ text = ['']
+ text.append("%sfile: %s" % (spacing, self._filename))
+ text.append("%sextension: %d" % (spacing, self._info['hdunum']-1))
+ text.append(
+ "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']]))
+
+ extname = self.get_extname()
+ if extname != "":
+ text.append("%sextname: %s" % (spacing, extname))
+ extver = self.get_extver()
+ if extver != 0:
+ text.append("%sextver: %s" % (spacing, extver))
+
+ return text, spacing
--- /dev/null
+"""
+image HDU classes for fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+ Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+from __future__ import with_statement, print_function
+from functools import reduce
+
+import numpy
+
+from math import floor
+from .base import HDUBase, IMAGE_HDU
+from ..util import IS_PY3, array_to_native, copy_if_needed
+
+# for python3 compat
+if IS_PY3:
+ xrange = range
+
+
+class ImageHDU(HDUBase):
+ def _update_info(self):
+ """
+ Call parent method and make sure this is in fact a
+ image HDU. Set dims in C order
+ """
+ super(ImageHDU, self)._update_info()
+
+ if self._info['hdutype'] != IMAGE_HDU:
+ mess = "Extension %s is not a Image HDU" % self.ext
+ raise ValueError(mess)
+
+ # convert to c order
+ if 'dims' in self._info:
+ self._info['dims'] = list(reversed(self._info['dims']))
+
+ def has_data(self):
+ """
+ Determine if this HDU has any data
+
+ For images, check that the dimensions are not zero.
+
+ For tables, check that the row count is not zero
+ """
+ ndims = self._info.get('ndims', 0)
+ if ndims == 0:
+ return False
+ else:
+ return True
+
+ def is_compressed(self):
+ """
+ returns true of this extension is compressed
+ """
+ return self._info['is_compressed_image'] == 1
+
+ def get_comptype(self):
+ """
+ Get the compression type.
+
+ None if the image is not compressed.
+ """
+ return self._info['comptype']
+
+ def get_dims(self):
+ """
+ get the shape of the image. Returns () for empty
+ """
+ if self._info['ndims'] != 0:
+ dims = self._info['dims']
+ else:
+ dims = ()
+
+ return dims
+
+ def reshape(self, dims):
+ """
+ reshape an existing image to the requested dimensions
+
+ parameters
+ ----------
+ dims: sequence
+ Any sequence convertible to i8
+ """
+
+ adims = numpy.array(dims, ndmin=1, dtype='i8')
+ self._FITS.reshape_image(self._ext+1, adims)
+
+ def write(self, img, start=0, **keys):
+ """
+ Write the image into this HDU
+
+ If data already exist in this HDU, they will be overwritten. If the
+ image to write is larger than the image on disk, or if the start
+ position is such that the write would extend beyond the existing
+ dimensions, the on-disk image is expanded.
+
+ parameters
+ ----------
+ img: ndarray
+ A simple numpy ndarray
+ start: integer or sequence
+ Where to start writing data. Can be an integer offset
+ into the entire array, or a sequence determining where
+ in N-dimensional space to start.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ dims = self.get_dims()
+
+ if img.dtype.fields is not None:
+ raise ValueError("got recarray, expected regular ndarray")
+ if img.size == 0:
+ raise ValueError("data must have at least 1 row")
+
+ # data must be c-contiguous and native byte order
+ if not img.flags['C_CONTIGUOUS']:
+ # this always makes a copy
+ img_send = numpy.ascontiguousarray(img)
+ array_to_native(img_send, inplace=True)
+ else:
+ img_send = array_to_native(img, inplace=False)
+
+ if IS_PY3 and img_send.dtype.char == 'U':
+ # for python3, we convert unicode to ascii
+ # this will error if the character is not in ascii
+ img_send = img_send.astype('S', copy=copy_if_needed)
+
+ if not numpy.isscalar(start):
+ # convert to scalar offset
+ # note we use the on-disk data type to get itemsize
+
+ offset = _convert_full_start_to_offset(dims, start)
+ else:
+ offset = start
+
+ # see if we need to resize the image
+ if self.has_data():
+ self._expand_if_needed(dims, img.shape, start, offset)
+
+ self._FITS.write_image(self._ext+1, img_send, offset+1)
+ self._update_info()
+
+ def read(self, **keys):
+ """
+ Read the image.
+
+ If the HDU is an IMAGE_HDU, read the corresponding image. Compression
+ and scaling are dealt with properly.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if not self.has_data():
+ return None
+
+ dtype, shape = self._get_dtype_and_shape()
+ array = numpy.zeros(shape, dtype=dtype)
+ self._FITS.read_image(self._ext+1, array)
+ return array
+
+ def _get_dtype_and_shape(self):
+ """
+ Get the numpy dtype and shape for image
+ """
+ npy_dtype = self._get_image_numpy_dtype()
+
+ if self._info['ndims'] != 0:
+ shape = self._info['dims']
+ else:
+ raise IOError("no image present in HDU")
+
+ return npy_dtype, shape
+
+ def _get_image_numpy_dtype(self):
+ """
+ Get the numpy dtype for the image
+ """
+ try:
+ ftype = self._info['img_equiv_type']
+ npy_type = _image_bitpix2npy[ftype]
+ except KeyError:
+ raise KeyError("unsupported fits data type: %d" % ftype)
+
+ return npy_type
+
+ def __getitem__(self, arg):
+ """
+ Get data from an image using python [] slice notation.
+
+ e.g., [2:25, 4:45].
+ """
+ return self._read_image_slice(arg)
+
+ def _read_image_slice(self, arg):
+ """
+ workhorse to read a slice
+ """
+ if 'ndims' not in self._info:
+ raise ValueError("Attempt to slice empty extension")
+
+ if isinstance(arg, slice):
+ # one-dimensional, e.g. 2:20
+ return self._read_image_slice((arg,))
+
+ if not isinstance(arg, tuple):
+ raise ValueError("arguments must be slices, one for each "
+ "dimension, e.g. [2:5] or [2:5,8:25] etc.")
+
+ # should be a tuple of slices, one for each dimension
+ # e.g. [2:3, 8:100]
+ nd = len(arg)
+ if nd != self._info['ndims']:
+ raise ValueError("Got slice dimensions %d, "
+ "expected %d" % (nd, self._info['ndims']))
+
+ targ = arg
+ arg = []
+ for a in targ:
+ if isinstance(a, slice):
+ arg.append(a)
+ elif isinstance(a, int):
+ arg.append(slice(a, a+1, 1))
+ else:
+ raise ValueError("arguments must be slices, e.g. 2:12")
+
+ dims = self._info['dims']
+ arrdims = []
+ first = []
+ last = []
+ steps = []
+ npy_dtype = self._get_image_numpy_dtype()
+
+ # check the args and reverse dimensions since
+ # fits is backwards from numpy
+ dim = 0
+ for slc in arg:
+ start = slc.start
+ stop = slc.stop
+ step = slc.step
+
+ if start is None:
+ start = 0
+ if stop is None:
+ stop = dims[dim]
+ if step is None:
+ # Ensure sane defaults.
+ if start <= stop:
+ step = 1
+ else:
+ step = -1
+
+ # Sanity checks for proper syntax.
+ if ((step > 0 and stop < start)
+ or (step < 0 and start < stop)
+ or (start == stop)):
+ return numpy.empty(0, dtype=npy_dtype)
+
+ if start < 0:
+ start = dims[dim] + start
+ if start < 0:
+ raise IndexError("Index out of bounds")
+
+ if stop < 0:
+ stop = dims[dim] + start + 1
+
+ # move to 1-offset
+ start = start + 1
+
+ if stop > dims[dim]:
+ stop = dims[dim]
+
+ if stop < start:
+ # A little black magic here. The stop is offset by 2 to
+ # accommodate the 1-offset of CFITSIO, and to move past the end
+ # pixel to get the complete set after it is flipped along the
+ # axis. Maybe there is a clearer way to accomplish what this
+ # offset is glossing over.
+ # @at88mph 2019.10.10
+ stop = stop + 2
+
+ first.append(start)
+ last.append(stop)
+
+ # Negative step values are not used in CFITSIO as the dimension is
+ # already properly calcualted.
+ # @at88mph 2019.10.21
+ steps.append(abs(step))
+ arrdims.append(int(floor((stop - start) / step)) + 1)
+
+ dim += 1
+
+ first.reverse()
+ last.reverse()
+ steps.reverse()
+ first = numpy.array(first, dtype='i8')
+ last = numpy.array(last, dtype='i8')
+ steps = numpy.array(steps, dtype='i8')
+
+ array = numpy.zeros(arrdims, dtype=npy_dtype)
+ self._FITS.read_image_slice(self._ext+1, first, last, steps,
+ self._ignore_scaling, array)
+ return array
+
+ def _expand_if_needed(self, dims, write_dims, start, offset):
+ """
+ expand the on-disk image if the indended write will extend
+ beyond the existing dimensions
+ """
+ from operator import mul
+
+ if numpy.isscalar(start):
+ start_is_scalar = True
+ else:
+ start_is_scalar = False
+
+ existing_size = reduce(mul, dims, 1)
+ required_size = offset + reduce(mul, write_dims, 1)
+
+ if required_size > existing_size:
+ # we need to expand the image
+ ndim = len(dims)
+ idim = len(write_dims)
+
+ if start_is_scalar:
+ if start == 0:
+ start = [0]*ndim
+ else:
+ raise ValueError(
+ "When expanding "
+ "an existing image while writing, the start keyword "
+ "must have the same number of dimensions "
+ "as the image or be exactly 0, got %s " % start)
+
+ if idim != ndim:
+ raise ValueError(
+ "When expanding "
+ "an existing image while writing, the input image "
+ "must have the same number of dimensions "
+ "as the original. "
+ "Got %d instead of %d" % (idim, ndim))
+ new_dims = []
+ for i in xrange(ndim):
+ required_dim = start[i] + write_dims[i]
+
+ if required_dim < dims[i]:
+ # careful not to shrink the image!
+ dimsize = dims[i]
+ else:
+ dimsize = required_dim
+
+ new_dims.append(dimsize)
+
+ self.reshape(new_dims)
+
+ def __repr__(self):
+ """
+ Representation for ImageHDU
+ """
+ text, spacing = self._get_repr_list()
+ text.append("%simage info:" % spacing)
+ cspacing = ' '*4
+
+ # need this check for when we haven't written data yet
+ if 'ndims' in self._info:
+ if self._info['comptype'] is not None:
+ text.append(
+ "%scompression: %s" % (cspacing, self._info['comptype']))
+
+ if self._info['ndims'] != 0:
+ dimstr = [str(d) for d in self._info['dims']]
+ dimstr = ",".join(dimstr)
+ else:
+ dimstr = ''
+
+ dt = _image_bitpix2npy[self._info['img_equiv_type']]
+ text.append("%sdata type: %s" % (cspacing, dt))
+ text.append("%sdims: [%s]" % (cspacing, dimstr))
+
+ text = '\n'.join(text)
+ return text
+
+
+def _convert_full_start_to_offset(dims, start):
+ # convert to scalar offset
+ # note we use the on-disk data type to get itemsize
+ ndim = len(dims)
+
+ # convert sequence to pixel start
+ if len(start) != ndim:
+ m = "start has len %d, which does not match requested dims %d"
+ raise ValueError(m % (len(start), ndim))
+
+ # this is really strides / itemsize
+ strides = [1]
+ for i in xrange(1, ndim):
+ strides.append(strides[i-1] * dims[ndim-i])
+
+ strides.reverse()
+ s = start
+ start_index = sum([s[i]*strides[i] for i in xrange(ndim)])
+
+ return start_index
+
+
+# remember, you should be using the equivalent image type for this
+_image_bitpix2npy = {
+ 8: 'u1',
+ 10: 'i1',
+ 16: 'i2',
+ 20: 'u2',
+ 32: 'i4',
+ 40: 'u4',
+ 64: 'i8',
+ -32: 'f4',
+ -64: 'f8'}
--- /dev/null
+"""
+image HDU classes for fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+ Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+from __future__ import with_statement, print_function
+import copy
+import warnings
+from functools import reduce
+
+import numpy as np
+
+from ..util import (
+ IS_PY3,
+ isstring,
+ isinteger,
+ is_object,
+ fields_are_object,
+ array_to_native,
+ array_to_native_c,
+ FITSRuntimeWarning,
+ mks,
+ copy_if_needed,
+)
+from .base import HDUBase, ASCII_TBL, IMAGE_HDU, _hdu_type_map
+
+# for python3 compat
+if IS_PY3:
+ xrange = range
+
+
+class TableHDU(HDUBase):
+ """
+ A table HDU
+
+ parameters
+ ----------
+ fits: FITS object
+ An instance of a _fistio_wrap.FITS object. This is the low-level
+ python object, not the FITS object defined above.
+ ext: integer
+ The extension number.
+ lower: bool, optional
+ If True, force all columns names to lower case in output
+ upper: bool, optional
+ If True, force all columns names to upper case in output
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Default is False.
+ vstorage: string, optional
+ Set the default method to store variable length columns. Can be
+ 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ case_sensitive: bool, optional
+ Match column names and extension names with case-sensitivity. Default
+ is False.
+ iter_row_buffer: integer
+ Number of rows to buffer when iterating over table HDUs.
+ Default is 1.
+ write_bitcols: bool, optional
+ If True, write logicals a a bit column. Default is False.
+ """
+ def __init__(self, fits, ext,
+ lower=False, upper=False, trim_strings=False,
+ vstorage='fixed', case_sensitive=False, iter_row_buffer=1,
+ write_bitcols=False, **keys):
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ # NOTE: The defaults of False above cannot be changed since they
+ # are or'ed with the method defaults below.
+ super(TableHDU, self).__init__(fits, ext)
+
+ self.lower = lower
+ self.upper = upper
+ self.trim_strings = trim_strings
+
+ self._vstorage = vstorage
+ self.case_sensitive = case_sensitive
+ self._iter_row_buffer = iter_row_buffer
+ self.write_bitcols = write_bitcols
+
+ if self._info['hdutype'] == ASCII_TBL:
+ self._table_type_str = 'ascii'
+ else:
+ self._table_type_str = 'binary'
+
+ def get_nrows(self):
+ """
+ Get number of rows in the table.
+ """
+ nrows = self._info.get('nrows', None)
+ if nrows is None:
+ raise ValueError("nrows not in info table; this is a bug")
+ return nrows
+
+ def get_colnames(self):
+ """
+ Get a copy of the column names for a table HDU
+ """
+ return copy.copy(self._colnames)
+
+ def get_colname(self, colnum):
+ """
+ Get the name associated with the given column number
+
+ parameters
+ ----------
+ colnum: integer
+ The number for the column, zero offset
+ """
+ if colnum < 0 or colnum > (len(self._colnames)-1):
+ raise ValueError(
+ "colnum out of range [0,%s-1]" % len(self._colnames)
+ )
+ return self._colnames[colnum]
+
+ def get_vstorage(self):
+ """
+ Get a string representing the storage method for variable length
+ columns
+ """
+ return copy.copy(self._vstorage)
+
+ def has_data(self):
+ """
+ Determine if this HDU has any data
+
+ Check that the row count is not zero
+ """
+ if self._info['nrows'] > 0:
+ return True
+ else:
+ return False
+
+ def where(self, expression, firstrow=None, lastrow=None):
+ """
+ Return the indices where the expression evaluates to true.
+
+ parameters
+ ----------
+ expression: string
+ A fits row selection expression. E.g.
+ "x > 3 && y < 5"
+ firstrow, lastrow : int
+ Range of rows for evaluation. This follows the Python list
+ slice convention that the last element is not included.
+ """
+ if firstrow is None:
+ firstrow = 0
+ elif firstrow < 0:
+ raise ValueError('firstrow cannot be negative')
+ if lastrow is None:
+ lastrow = self._info['nrows']
+ elif lastrow < firstrow:
+ raise ValueError('lastrow cannot be less than firstrow')
+ elif lastrow > self._info['nrows']:
+ raise ValueError('lastrow cannot be greater than nrows')
+ nrows = lastrow - firstrow
+ return self._FITS.where(self._ext+1, expression, firstrow+1, nrows)
+
+ def write(self, data, firstrow=0, columns=None, names=None, slow=False,
+ **keys):
+ """
+ Write data into this HDU
+
+ parameters
+ ----------
+ data: ndarray or list of ndarray
+ A numerical python array. Should be an ordinary array for image
+ HDUs, should have fields for tables. To write an ordinary array to
+ a column in a table HDU, use write_column. If data already exists
+ in this HDU, it will be overwritten. See the append(() method to
+ append new rows to a table HDU.
+ firstrow: integer, optional
+ At which row you should begin writing to tables. Be sure you know
+ what you are doing! For appending see the append() method.
+ Default 0.
+ columns: list, optional
+ If data is a list of arrays, you must send columns as a list
+ of names or column numbers. You can also use the `names` keyword
+ argument.
+ names: list, optional
+ If data is a list of arrays, you must send columns as a list
+ of names or column numbers. You can also use the `columns` keyword
+ argument.
+ slow: bool, optional
+ If True, use a slower method to write one column at a time. Useful
+ for debugging.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ isrec = False
+ if isinstance(data, (list, dict)):
+ if isinstance(data, list):
+ data_list = data
+ if columns is not None:
+ columns_all = columns
+ elif names is not None:
+ columns_all = names
+ else:
+ raise ValueError(
+ "you must send `columns` or `names` "
+ "with a list of arrays")
+ else:
+ columns_all = list(data.keys())
+ data_list = [data[n] for n in columns_all]
+
+ colnums_all = [self._extract_colnum(c) for c in columns_all]
+ names = [self.get_colname(c) for c in colnums_all]
+
+ isobj = np.zeros(len(data_list), dtype=bool)
+ for i in xrange(len(data_list)):
+ isobj[i] = is_object(data_list[i])
+
+ else:
+ if data.dtype.fields is None:
+ raise ValueError("You are writing to a table, so I expected "
+ "an array with fields as input. If you want "
+ "to write a simple array, you should use "
+ "write_column to write to a single column, "
+ "or instead write to an image hdu")
+
+ if data.shape == ():
+ raise ValueError("cannot write data with shape ()")
+
+ isrec = True
+ names = data.dtype.names
+ # only write object types (variable-length columns) after
+ # writing the main table
+ isobj = fields_are_object(data)
+
+ data_list = []
+ colnums_all = []
+ for i, name in enumerate(names):
+ colnum = self._extract_colnum(name)
+ data_list.append(data[name])
+ colnums_all.append(colnum)
+
+ if slow:
+ for i, name in enumerate(names):
+ if not isobj[i]:
+ self.write_column(name, data_list[i], firstrow=firstrow)
+ else:
+
+ nonobj_colnums = []
+ nonobj_arrays = []
+ for i in xrange(len(data_list)):
+ if not isobj[i]:
+ nonobj_colnums.append(colnums_all[i])
+ if isrec:
+ # this still leaves possibility of f-order sub-arrays..
+ colref = array_to_native(data_list[i], inplace=False)
+ else:
+ colref = array_to_native_c(data_list[i], inplace=False)
+
+ if IS_PY3 and colref.dtype.char == 'U':
+ # for python3, we convert unicode to ascii
+ # this will error if the character is not in ascii
+ colref = colref.astype('S', copy=copy_if_needed)
+
+ nonobj_arrays.append(colref)
+
+ for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays):
+ self._verify_column_data(tcolnum, tdata)
+
+ if len(nonobj_arrays) > 0:
+ self._FITS.write_columns(
+ self._ext+1, nonobj_colnums, nonobj_arrays,
+ firstrow=firstrow+1, write_bitcols=self.write_bitcols)
+
+ # writing the object arrays always occurs the same way
+ # need to make sure this works for array fields
+ for i, name in enumerate(names):
+ if isobj[i]:
+ self.write_var_column(name, data_list[i], firstrow=firstrow)
+
+ self._update_info()
+
+ def write_column(self, column, data, firstrow=0, **keys):
+ """
+ Write data to a column in this HDU
+
+ This HDU must be a table HDU.
+
+ parameters
+ ----------
+ column: scalar string/integer
+ The column in which to write. Can be the name or number (0 offset)
+ data: ndarray
+ Numerical python array to write. This should match the
+ shape of the column. You are probably better using
+ fits.write_table() to be sure.
+ firstrow: integer, optional
+ At which row you should begin writing. Be sure you know what you
+ are doing! For appending see the append() method. Default 0.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ colnum = self._extract_colnum(column)
+
+ # need it to be contiguous and native byte order. For now, make a
+ # copy. but we may be able to avoid this with some care.
+
+ if not data.flags['C_CONTIGUOUS']:
+ # this always makes a copy
+ data_send = np.ascontiguousarray(data)
+ # this is a copy, we can make sure it is native
+ # and modify in place if needed
+ array_to_native(data_send, inplace=True)
+ else:
+ # we can avoid the copy with a try-finally block and
+ # some logic
+ data_send = array_to_native(data, inplace=False)
+
+ if IS_PY3 and data_send.dtype.char == 'U':
+ # for python3, we convert unicode to ascii
+ # this will error if the character is not in ascii
+ data_send = data_send.astype('S', copy=copy_if_needed)
+
+ self._verify_column_data(colnum, data_send)
+
+ self._FITS.write_columns(
+ self._ext+1,
+ [colnum],
+ [data_send],
+ firstrow=firstrow+1,
+ write_bitcols=self.write_bitcols,
+ )
+
+ del data_send
+ self._update_info()
+
+ def _verify_column_data(self, colnum, data):
+ """
+ verify the input data is of the correct type and shape
+ """
+ this_dt = data.dtype.descr[0]
+
+ if len(data.shape) > 2:
+ this_shape = data.shape[1:]
+ elif len(data.shape) == 2 and data.shape[1] > 1:
+ this_shape = data.shape[1:]
+ else:
+ this_shape = ()
+
+ this_npy_type = this_dt[1][1:]
+
+ npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+ info = self._info['colinfo'][colnum]
+
+ if npy_type[0] in ['>', '<', '|']:
+ npy_type = npy_type[1:]
+
+ col_name = info['name']
+ col_tdim = info['tdim']
+ col_shape = _tdim2shape(
+ col_tdim, col_name, is_string=(npy_type[0] == 'S'))
+
+ if col_shape is None:
+ if this_shape == ():
+ this_shape = None
+
+ if col_shape is not None and not isinstance(col_shape, tuple):
+ col_shape = (col_shape,)
+
+ # this mismatch is OK
+ if npy_type == 'i1' and this_npy_type == 'b1':
+ this_npy_type = 'i1'
+
+ if isinstance(self, AsciiTableHDU):
+ # we don't enforce types exact for ascii
+ if npy_type == 'i8' and this_npy_type in ['i2', 'i4']:
+ this_npy_type = 'i8'
+ elif npy_type == 'f8' and this_npy_type == 'f4':
+ this_npy_type = 'f8'
+
+ if this_npy_type != npy_type:
+ raise ValueError(
+ "bad input data for column '%s': "
+ "expected '%s', got '%s'" % (
+ col_name, npy_type, this_npy_type))
+
+ if this_shape != col_shape:
+ raise ValueError(
+ "bad input shape for column '%s': "
+ "expected '%s', got '%s'" % (col_name, col_shape, this_shape))
+
+ def write_var_column(self, column, data, firstrow=0, **keys):
+ """
+ Write data to a variable-length column in this HDU
+
+ This HDU must be a table HDU.
+
+ parameters
+ ----------
+ column: scalar string/integer
+ The column in which to write. Can be the name or number (0 offset)
+ column: ndarray
+ Numerical python array to write. This must be an object array.
+ firstrow: integer, optional
+ At which row you should begin writing. Be sure you know what you
+ are doing! For appending see the append() method. Default 0.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if not is_object(data):
+ raise ValueError("Only object fields can be written to "
+ "variable-length arrays")
+ colnum = self._extract_colnum(column)
+
+ self._FITS.write_var_column(self._ext+1, colnum+1, data,
+ firstrow=firstrow+1)
+ self._update_info()
+
+ def insert_column(self, name, data, colnum=None, write_bitcols=None,
+ **keys):
+ """
+ Insert a new column.
+
+ parameters
+ ----------
+ name: string
+ The column name
+ data:
+ The data to write into the new column.
+ colnum: int, optional
+ The column number for the new column, zero-offset. Default
+ is to add the new column after the existing ones.
+ write_bitcols: bool, optional
+ If set, write logical as bit cols. This can over-ride the
+ internal class setting. Default of None respects the inner
+ class setting.
+
+ Notes
+ -----
+ This method is used un-modified by ascii tables as well.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if write_bitcols is None:
+ write_bitcols = self.write_bitcols
+
+ if name in self._colnames:
+ raise ValueError("column '%s' already exists" % name)
+
+ if IS_PY3 and data.dtype.char == 'U':
+ # fast dtype conversion using an empty array
+ # we could hack at the actual text description, but using
+ # the numpy API is probably safer
+ # this also avoids doing a dtype conversion on every array
+ # element which could b expensive
+ descr = np.empty(1).astype(data.dtype).astype('S').dtype.descr
+ else:
+ descr = data.dtype.descr
+
+ if len(descr) > 1:
+ raise ValueError("you can only insert a single column, "
+ "requested: %s" % descr)
+
+ this_descr = descr[0]
+ this_descr = [name, this_descr[1]]
+ if len(data.shape) > 1:
+ this_descr += [data.shape[1:]]
+ this_descr = tuple(this_descr)
+
+ name, fmt, dims = _npy2fits(
+ this_descr,
+ table_type=self._table_type_str,
+ write_bitcols=write_bitcols,
+ )
+ if dims is not None:
+ dims = [dims]
+
+ if colnum is None:
+ new_colnum = len(self._info['colinfo']) + 1
+ else:
+ new_colnum = colnum+1
+
+ self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
+
+ self._update_info()
+
+ self.write_column(name, data)
+
+ def append(self, data, columns=None, names=None, **keys):
+ """
+ Append new rows to a table HDU
+
+ parameters
+ ----------
+ data: ndarray or list of arrays
+ A numerical python array with fields (recarray) or a list of
+ arrays. Should have the same fields as the existing table. If only
+ a subset of the table columns are present, the other columns are
+ filled with zeros.
+ columns: list, optional
+ If data is a list of arrays, you must send columns as a list
+ of names or column numbers. You can also use the `names` keyword
+ argument.
+ names: list, optional
+ If data is a list of arrays, you must send columns as a list
+ of names or column numbers. You can also use the `columns` keyword
+ argument.
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ firstrow = self._info['nrows']
+ self.write(data, firstrow=firstrow, columns=columns, names=names)
+
+ def delete_rows(self, rows):
+ """
+ Delete rows from the table
+
+ parameters
+ ----------
+ rows: sequence or slice
+ The exact rows to delete as a sequence, or a slice.
+
+ examples
+ --------
+ # delete a range of rows
+ with fitsio.FITS(fname,'rw') as fits:
+ fits['mytable'].delete_rows(slice(3,20))
+
+ # delete specific rows
+ with fitsio.FITS(fname,'rw') as fits:
+ rows2delete = [3,88,76]
+ fits['mytable'].delete_rows(rows2delete)
+ """
+
+ if rows is None:
+ return
+
+ # extract and convert to 1-offset for C routine
+ if isinstance(rows, slice):
+ rows = self._process_slice(rows)
+ if rows.step is not None and rows.step != 1:
+ rows = np.arange(
+ rows.start+1,
+ rows.stop+1,
+ rows.step,
+ )
+ else:
+ # rows must be 1-offset
+ rows = slice(rows.start+1, rows.stop+1)
+ else:
+ rows, sortind = self._extract_rows(rows, sort=True)
+ # rows must be 1-offset
+ rows += 1
+
+ if isinstance(rows, slice):
+ self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop)
+ else:
+ if rows.size == 0:
+ return
+
+ self._FITS.delete_rows(self._ext+1, rows)
+
+ self._update_info()
+
+ def resize(self, nrows, front=False):
+ """
+ Resize the table to the given size, removing or adding rows as
+ necessary. Note if expanding the table at the end, it is more
+ efficient to use the append function than resizing and then
+ writing.
+
+ New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
+ which get -128,32768,2147483648 respectively
+
+ parameters
+ ----------
+ nrows: int
+ new size of table
+ front: bool, optional
+ If True, add or remove rows from the front. Default
+ is False
+ """
+
+ nrows_current = self.get_nrows()
+ if nrows == nrows_current:
+ return
+
+ if nrows < nrows_current:
+ rowdiff = nrows_current - nrows
+ if front:
+ # delete from the front
+ start = 0
+ stop = rowdiff
+ else:
+ # delete from the back
+ start = nrows
+ stop = nrows_current
+
+ self.delete_rows(slice(start, stop))
+ else:
+ rowdiff = nrows - nrows_current
+ if front:
+ # in this case zero is what we want, since the code inserts
+ firstrow = 0
+ else:
+ firstrow = nrows_current
+ self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
+
+ self._update_info()
+
+ def read(self, columns=None, rows=None, vstorage=None,
+ upper=False, lower=False, trim_strings=False, **keys):
+ """
+ Read data from this HDU
+
+ By default, all data are read. You can set the `columns` and/or
+ `rows` keywords to read subsets of the data.
+
+ Table data is read into a numpy recarray. To get a single column as
+ a numpy.ndarray, use the `read_column` method.
+
+ Slice notation is also supported for `TableHDU` types.
+
+ >>> fits = fitsio.FITS(filename)
+ >>> fits[ext][:]
+ >>> fits[ext][2:5]
+ >>> fits[ext][200:235:2]
+ >>> fits[ext][rows]
+ >>> fits[ext][cols][rows]
+
+ parameters
+ ----------
+ columns: optional
+ An optional set of columns to read from table HDUs. Default is to
+ read all. Can be string or number. If a sequence, a recarray
+ is always returned. If a scalar, an ordinary array is returned.
+ rows: optional
+ An optional list of rows to read from table HDUS. Default is to
+ read all.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if columns is not None:
+ data = self.read_columns(
+ columns, rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+ elif rows is not None:
+ # combinations of row and column subsets are covered by
+ # read_columns so we pass colnums=None here to get all columns
+ data = self.read_rows(
+ rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+ else:
+ data = self._read_all(
+ vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+
+ return data
+
+ def _read_all(self, vstorage=None,
+ upper=False, lower=False, trim_strings=False, colnums=None,
+ **keys):
+ """
+ Read all data in the HDU.
+
+ parameters
+ ----------
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ colnums: integer array, optional
+ The column numbers, 0 offset
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ dtype, offsets, isvar = self.get_rec_dtype(
+ colnums=colnums, vstorage=vstorage)
+
+ w, = np.where(isvar == True) # noqa
+ has_tbit = self._check_tbit()
+
+ if w.size > 0:
+ if vstorage is None:
+ _vstorage = self._vstorage
+ else:
+ _vstorage = vstorage
+ colnums = self._extract_colnums()
+ rows = None
+ sortind = None
+ array = self._read_rec_with_var(
+ colnums, rows, sortind, dtype,
+ offsets, isvar, _vstorage,
+ )
+ elif has_tbit:
+ # drop down to read_columns since we can't stuff into a
+ # contiguous array
+ colnums = self._extract_colnums()
+ array = self.read_columns(
+ colnums,
+ rows=None, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+ else:
+ firstrow = 1 # noqa - not used?
+ nrows = self._info['nrows']
+ array = np.zeros(nrows, dtype=dtype)
+
+ self._FITS.read_as_rec(self._ext+1, 1, nrows, array)
+
+ array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+ for colnum, name in enumerate(array.dtype.names):
+ self._rescale_and_convert_field_inplace(
+ array,
+ name,
+ self._info['colinfo'][colnum]['tscale'],
+ self._info['colinfo'][colnum]['tzero'])
+
+ if self.lower or lower:
+ _names_to_lower_if_recarray(array)
+ elif self.upper or upper:
+ _names_to_upper_if_recarray(array)
+
+ self._maybe_trim_strings(array, trim_strings=trim_strings)
+ return array
+
+ def read_column(self, col, rows=None, vstorage=None,
+ upper=False, lower=False, trim_strings=False, **keys):
+ """
+ Read the specified column
+
+ Alternatively, you can use slice notation
+
+ >>> fits=fitsio.FITS(filename)
+ >>> fits[ext][colname][:]
+ >>> fits[ext][colname][2:5]
+ >>> fits[ext][colname][200:235:2]
+ >>> fits[ext][colname][rows]
+
+ Note, if reading multiple columns, it is more efficient to use
+ read(columns=) or slice notation with a list of column names.
+
+ parameters
+ ----------
+ col: string/int, required
+ The column name or number.
+ rows: optional
+ An optional set of row numbers to read.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ res = self.read_columns(
+ [col], rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+ colname = res.dtype.names[0]
+ data = res[colname]
+
+ self._maybe_trim_strings(data, trim_strings=trim_strings)
+ return data
+
+ def read_rows(self, rows, vstorage=None,
+ upper=False, lower=False, trim_strings=False, **keys):
+ """
+ Read the specified rows.
+
+ parameters
+ ----------
+ rows: list,array
+ A list or array of row indices.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if rows is None:
+ # we actually want all rows!
+ return self._read_all()
+
+ if self._info['hdutype'] == ASCII_TBL:
+ return self.read(
+ rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+
+ rows, sortind = self._extract_rows(rows)
+ dtype, offsets, isvar = self.get_rec_dtype(vstorage=vstorage)
+
+ w, = np.where(isvar == True) # noqa
+ has_tbit = self._check_tbit()
+
+ if w.size > 0:
+ if vstorage is None:
+ _vstorage = self._vstorage
+ else:
+ _vstorage = vstorage
+ colnums = self._extract_colnums()
+ return self._read_rec_with_var(
+ colnums, rows, sortind, dtype, offsets, isvar, _vstorage,
+ )
+ elif has_tbit:
+ # drop down to read_columns since we can't stuff into a
+ # contiguous array
+ colnums = self._extract_colnums()
+ array = self.read_columns(
+ colnums, rows=rows, vstorage=vstorage, upper=upper,
+ lower=lower, trim_strings=trim_strings,
+ )
+ else:
+ array = np.zeros(rows.size, dtype=dtype)
+ self._FITS.read_rows_as_rec(self._ext+1, array, rows, sortind)
+
+ array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+ for colnum, name in enumerate(array.dtype.names):
+ self._rescale_and_convert_field_inplace(
+ array,
+ name,
+ self._info['colinfo'][colnum]['tscale'],
+ self._info['colinfo'][colnum]['tzero'])
+
+ if self.lower or lower:
+ _names_to_lower_if_recarray(array)
+ elif self.upper or upper:
+ _names_to_upper_if_recarray(array)
+
+ self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+ return array
+
+ def read_columns(self, columns, rows=None, vstorage=None,
+ upper=False, lower=False, trim_strings=False, **keys):
+ """
+ read a subset of columns from this binary table HDU
+
+ By default, all rows are read. Send rows= to select subsets of the
+ data. Table data are read into a recarray for multiple columns,
+ plain array for a single column.
+
+ parameters
+ ----------
+ columns: list/array
+ An optional set of columns to read from table HDUs. Can be string
+ or number. If a sequence, a recarray is always returned. If a
+ scalar, an ordinary array is returned.
+ rows: list/array, optional
+ An optional list of rows to read from table HDUS. Default is to
+ read all.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if self._info['hdutype'] == ASCII_TBL:
+ return self.read(
+ columns=columns, rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+
+ # if columns is None, returns all. Guaranteed to be unique and sorted
+ colnums = self._extract_colnums(columns)
+ if isinstance(colnums, int):
+ # scalar sent, don't read as a recarray
+ return self.read_column(
+ columns,
+ rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings,
+ )
+
+ # if rows is None still returns None, and is correctly interpreted
+ # by the reader to mean all
+ rows, sortind = self._extract_rows(rows)
+
+ # this is the full dtype for all columns
+ dtype, offsets, isvar = self.get_rec_dtype(
+ colnums=colnums, vstorage=vstorage)
+
+ w, = np.where(isvar == True) # noqa
+ if w.size > 0:
+ if vstorage is None:
+ _vstorage = self._vstorage
+ else:
+ _vstorage = vstorage
+ array = self._read_rec_with_var(
+ colnums, rows, sortind, dtype, offsets, isvar, _vstorage,
+ )
+ else:
+
+ if rows is None:
+ nrows = self._info['nrows']
+ else:
+ nrows = rows.size
+
+ array = np.zeros(nrows, dtype=dtype)
+
+ colnumsp = colnums[:].copy()
+ colnumsp[:] += 1
+ self._FITS.read_columns_as_rec(
+ self._ext+1, colnumsp, array, rows, sortind
+ )
+
+ array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+ for i in xrange(colnums.size):
+ colnum = int(colnums[i])
+ name = array.dtype.names[i]
+ self._rescale_and_convert_field_inplace(
+ array,
+ name,
+ self._info['colinfo'][colnum]['tscale'],
+ self._info['colinfo'][colnum]['tzero'])
+
+ if (self._check_tbit(colnums=colnums)):
+ array = self._fix_tbit_dtype(array, colnums)
+
+ if self.lower or lower:
+ _names_to_lower_if_recarray(array)
+ elif self.upper or upper:
+ _names_to_upper_if_recarray(array)
+
+ self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+ return array
+
+ def read_slice(self, firstrow, lastrow, step=1,
+ vstorage=None, lower=False, upper=False,
+ trim_strings=False, **keys):
+ """
+ Read the specified row slice from a table.
+
+ Read all rows between firstrow and lastrow (non-inclusive, as per
+ python slice notation). Note you must use slice notation for
+ images, e.g. f[ext][20:30, 40:50]
+
+ parameters
+ ----------
+ firstrow: integer
+ The first row to read
+ lastrow: integer
+ The last row to read, non-inclusive. This follows the python list
+ slice convention that one does not include the last element.
+ step: integer, optional
+ Step between rows, default 1. e.g., if step is 2, skip every other
+ row.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if self._info['hdutype'] == ASCII_TBL:
+ rows = np.arange(firstrow, lastrow, step, dtype='i8')
+ return self.read_ascii(
+ rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+
+ if self._info['hdutype'] == IMAGE_HDU:
+ raise ValueError("slices currently only supported for tables")
+
+ maxrow = self._info['nrows']
+ if firstrow < 0 or lastrow > maxrow:
+ raise ValueError(
+ "slice must specify a sub-range of [%d,%d]" % (0, maxrow))
+
+ dtype, offsets, isvar = self.get_rec_dtype(vstorage=vstorage)
+
+ w, = np.where(isvar == True) # noqa
+ has_tbit = self._check_tbit()
+
+ if w.size > 0:
+ if vstorage is None:
+ _vstorage = self._vstorage
+ else:
+ _vstorage = vstorage
+ rows = np.arange(firstrow, lastrow, step, dtype='i8')
+ sortind = np.arange(rows.size, dtype='i8')
+ colnums = self._extract_colnums()
+ array = self._read_rec_with_var(
+ colnums, rows, sortind, dtype, offsets, isvar, _vstorage)
+ elif has_tbit:
+ # drop down to read_columns since we can't stuff into a
+ # contiguous array
+ colnums = self._extract_colnums()
+ rows = np.arange(firstrow, lastrow, step, dtype='i8')
+ array = self.read_columns(
+ colnums, rows=rows, vstorage=vstorage, upper=upper,
+ lower=lower, trim_strings=trim_strings,
+ )
+ else:
+ if step != 1:
+ rows = np.arange(firstrow, lastrow, step, dtype='i8')
+ array = self.read(rows=rows)
+ else:
+ # no +1 because lastrow is non-inclusive
+ nrows = lastrow - firstrow
+ array = np.zeros(nrows, dtype=dtype)
+
+ # only first needs to be +1. This is becuase the c code is
+ # inclusive
+ self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array)
+
+ array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(
+ array)
+
+ for colnum, name in enumerate(array.dtype.names):
+ self._rescale_and_convert_field_inplace(
+ array,
+ name,
+ self._info['colinfo'][colnum]['tscale'],
+ self._info['colinfo'][colnum]['tzero'])
+
+ if self.lower or lower:
+ _names_to_lower_if_recarray(array)
+ elif self.upper or upper:
+ _names_to_upper_if_recarray(array)
+
+ self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+ return array
+
+ def get_rec_dtype(self, colnums=None, vstorage=None, **keys):
+ """
+ Get the dtype for the specified columns
+
+ parameters
+ ----------
+ colnums: integer array, optional
+ The column numbers, 0 offset
+ vstorage: string, optional
+ See docs in read_columns
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if vstorage is None:
+ _vstorage = self._vstorage
+ else:
+ _vstorage = vstorage
+
+ if colnums is None:
+ colnums = self._extract_colnums()
+
+ descr = []
+ isvararray = np.zeros(len(colnums), dtype=bool)
+ for i, colnum in enumerate(colnums):
+ dt, isvar = self.get_rec_column_descr(colnum, _vstorage)
+ descr.append(dt)
+ isvararray[i] = isvar
+ dtype = np.dtype(descr)
+
+ offsets = np.zeros(len(colnums), dtype='i8')
+ for i, n in enumerate(dtype.names):
+ offsets[i] = dtype.fields[n][1]
+ return dtype, offsets, isvararray
+
+ def _check_tbit(self, colnums=None, **keys):
+ """
+ Check if one of the columns is a TBIT column
+
+ parameters
+ ----------
+ colnums: integer array, optional
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if colnums is None:
+ colnums = self._extract_colnums()
+
+ has_tbit = False
+ for i, colnum in enumerate(colnums):
+ npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+ if (istbit):
+ has_tbit = True
+ break
+
+ return has_tbit
+
+ def _fix_tbit_dtype(self, array, colnums):
+ """
+ If necessary, patch up the TBIT to convert to bool array
+
+ parameters
+ ----------
+ array: record array
+ colnums: column numbers for lookup
+ """
+ descr = array.dtype.descr
+ for i, colnum in enumerate(colnums):
+ npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+ if (istbit):
+ coldescr = list(descr[i])
+ coldescr[1] = '?'
+ descr[i] = tuple(coldescr)
+
+ return array.view(descr)
+
+ def _get_simple_dtype_and_shape(self, colnum, rows=None):
+ """
+ When reading a single column, we want the basic data
+ type and the shape of the array.
+
+ for scalar columns, shape is just nrows, otherwise
+ it is (nrows, dim1, dim2)
+
+ Note if rows= is sent and only a single row is requested,
+ the shape will be (dim2,dim2)
+ """
+
+ # basic datatype
+ npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+ info = self._info['colinfo'][colnum]
+ name = info['name']
+
+ if rows is None:
+ nrows = self._info['nrows']
+ else:
+ nrows = rows.size
+
+ shape = None
+ tdim = info['tdim']
+
+ shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
+ if shape is not None:
+ if nrows > 1:
+ if not isinstance(shape, tuple):
+ # vector
+ shape = (nrows, shape)
+ else:
+ # multi-dimensional
+ shape = tuple([nrows] + list(shape))
+ else:
+ # scalar
+ shape = nrows
+ return npy_type, shape
+
+ def get_rec_column_descr(self, colnum, vstorage):
+ """
+ Get a descriptor entry for the specified column.
+
+ parameters
+ ----------
+ colnum: integer
+ The column number, 0 offset
+ vstorage: string
+ See docs in read_columns
+ """
+ npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
+ name = self._info['colinfo'][colnum]['name']
+
+ if isvar:
+ if vstorage == 'object':
+ descr = (name, 'O')
+ else:
+ tform = self._info['colinfo'][colnum]['tform']
+ max_size = _extract_vararray_max(tform)
+
+ if max_size <= 0:
+ name = self._info['colinfo'][colnum]['name']
+ mess = 'Will read as an object field'
+ if max_size < 0:
+ mess = "Column '%s': No maximum size: '%s'. %s"
+ mess = mess % (name, tform, mess)
+ warnings.warn(mess, FITSRuntimeWarning)
+ else:
+ mess = "Column '%s': Max size is zero: '%s'. %s"
+ mess = mess % (name, tform, mess)
+ warnings.warn(mess, FITSRuntimeWarning)
+
+ # we are forced to read this as an object array
+ return self.get_rec_column_descr(colnum, 'object')
+
+ if npy_type[0] == 'S':
+ # variable length string columns cannot
+ # themselves be arrays I don't think
+ npy_type = 'S%d' % max_size
+ descr = (name, npy_type)
+ elif npy_type[0] == 'U':
+ # variable length string columns cannot
+ # themselves be arrays I don't think
+ npy_type = 'U%d' % max_size
+ descr = (name, npy_type)
+ else:
+ descr = (name, npy_type, max_size)
+ else:
+ tdim = self._info['colinfo'][colnum]['tdim']
+ shape = _tdim2shape(
+ tdim, name,
+ is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
+ if shape is not None:
+ descr = (name, npy_type, shape)
+ else:
+ descr = (name, npy_type)
+ return descr, isvar
+
+ def _read_rec_with_var(
+ self, colnums, rows, sortind, dtype, offsets, isvar, vstorage):
+ """
+ Read columns from a table into a rec array, including variable length
+ columns. This is special because, for efficiency, it involves reading
+ from the main table as normal but skipping the columns in the array
+ that are variable. Then reading the variable length columns, with
+ accounting for strides appropriately.
+
+ row and column numbers should be checked before calling this function
+ """
+
+ colnumsp = colnums+1
+ if rows is None:
+ nrows = self._info['nrows']
+ else:
+ nrows = rows.size
+ array = np.zeros(nrows, dtype=dtype)
+
+ # read from the main table first
+ wnotvar, = np.where(isvar == False) # noqa
+ if wnotvar.size > 0:
+ # this will be contiguous (not true for slices)
+ thesecol = colnumsp[wnotvar]
+ theseoff = offsets[wnotvar]
+ self._FITS.read_columns_as_rec_byoffset(
+ self._ext+1,
+ thesecol,
+ theseoff,
+ array,
+ rows,
+ sortind,
+ )
+ for i in xrange(thesecol.size):
+
+ name = array.dtype.names[wnotvar[i]]
+ colnum = thesecol[i]-1
+ self._rescale_and_convert_field_inplace(
+ array,
+ name,
+ self._info['colinfo'][colnum]['tscale'],
+ self._info['colinfo'][colnum]['tzero'],
+ )
+
+ array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+ # now read the variable length arrays we may be able to speed this up
+ # by storing directly instead of reading first into a list
+ wvar, = np.where(isvar == True) # noqa
+ if wvar.size > 0:
+ # this will be contiguous (not true for slices)
+ thesecol = colnumsp[wvar]
+ for i in xrange(thesecol.size):
+ colnump = thesecol[i]
+ name = array.dtype.names[wvar[i]]
+ dlist = self._FITS.read_var_column_as_list(
+ self._ext+1, colnump, rows, sortind,
+ )
+
+ if (isinstance(dlist[0], str) or
+ (IS_PY3 and isinstance(dlist[0], bytes))):
+ is_string = True
+ else:
+ is_string = False
+
+ if array[name].dtype.descr[0][1][1] == 'O':
+ # storing in object array
+ # get references to each, no copy made
+ for irow, item in enumerate(dlist):
+ if sortind is not None:
+ irow = sortind[irow]
+ if IS_PY3 and isinstance(item, bytes):
+ item = item.decode('ascii')
+ array[name][irow] = item
+ else:
+ for irow, item in enumerate(dlist):
+ if sortind is not None:
+ irow = sortind[irow]
+ if IS_PY3 and isinstance(item, bytes):
+ item = item.decode('ascii')
+
+ if is_string:
+ array[name][irow] = item
+ else:
+ ncopy = len(item)
+
+ if IS_PY3:
+ ts = array[name].dtype.descr[0][1][1]
+ if ts != 'S' and ts != 'U':
+ array[name][irow][0:ncopy] = item[:]
+ else:
+ array[name][irow] = item
+ else:
+ array[name][irow][0:ncopy] = item[:]
+
+ return array
+
+ def _extract_rows(self, rows, sort=False):
+ """
+ Extract an array of rows from an input scalar or sequence
+ """
+ if rows is not None:
+ rows = np.array(rows, ndmin=1, copy=copy_if_needed, dtype='i8')
+ if sort:
+ rows = np.unique(rows)
+ return rows, None
+
+ # returns unique, sorted. Force i8 for 32-bit systems
+ sortind = np.array(rows.argsort(), dtype='i8', copy=copy_if_needed)
+
+ maxrow = self._info['nrows']-1
+ if rows.size > 0:
+ firstrow = rows[sortind[0]]
+ lastrow = rows[sortind[-1]]
+
+ if len(rows) > 0 and (firstrow < 0 or lastrow > maxrow):
+ raise ValueError("rows must be in [%d,%d]" % (0, maxrow))
+ else:
+ sortind = None
+
+ return rows, sortind
+
+ def _process_slice(self, arg):
+ """
+ process the input slice for use calling the C code
+ """
+ start = arg.start
+ stop = arg.stop
+ step = arg.step
+
+ nrows = self._info['nrows']
+ if step is None:
+ step = 1
+ if start is None:
+ start = 0
+ if stop is None:
+ stop = nrows
+
+ if start < 0:
+ start = nrows + start
+ if start < 0:
+ raise IndexError("Index out of bounds")
+
+ if stop < 0:
+ stop = nrows + start + 1
+
+ if stop < start:
+ # will return an empty struct
+ stop = start
+
+ if stop > nrows:
+ stop = nrows
+ return slice(start, stop, step)
+
+ def _slice2rows(self, start, stop, step=None):
+ """
+ Convert a slice to an explicit array of rows
+ """
+ nrows = self._info['nrows']
+ if start is None:
+ start = 0
+ if stop is None:
+ stop = nrows
+ if step is None:
+ step = 1
+
+ tstart = self._fix_range(start)
+ tstop = self._fix_range(stop)
+ if tstart == 0 and tstop == nrows and step is None:
+ # this is faster: if all fields are also requested, then a
+ # single fread will be done
+ return None
+ if stop < start:
+ raise ValueError("start is greater than stop in slice")
+ return np.arange(tstart, tstop, step, dtype='i8')
+
+ def _fix_range(self, num, isslice=True):
+ """
+ Ensure the input is within range.
+
+ If el=True, then don't treat as a slice element
+ """
+
+ nrows = self._info['nrows']
+ if isslice:
+ # include the end
+ if num < 0:
+ num = nrows + (1+num)
+ elif num > nrows:
+ num = nrows
+ else:
+ # single element
+ if num < 0:
+ num = nrows + num
+ elif num > (nrows-1):
+ num = nrows-1
+
+ return num
+
+ def _rescale_and_convert_field_inplace(self, array, name, scale, zero):
+ """
+ Apply fits scalings. Also, convert bool to proper
+ numpy boolean values
+ """
+ self._rescale_array(array[name], scale, zero)
+ if array[name].dtype == bool:
+ array[name] = self._convert_bool_array(array[name])
+ return array
+
+ def _rescale_and_convert(self, array, scale, zero, name=None):
+ """
+ Apply fits scalings. Also, convert bool to proper
+ numpy boolean values
+ """
+ self._rescale_array(array, scale, zero)
+ if array.dtype == bool:
+ array = self._convert_bool_array(array)
+
+ return array
+
+ def _rescale_array(self, array, scale, zero):
+ """
+ Scale the input array
+ """
+ if scale != 1.0:
+ sval = np.array(scale, dtype=array.dtype)
+ array *= sval
+ if zero != 0.0:
+ zval = np.array(zero, dtype=array.dtype)
+ array += zval
+
+ def _maybe_trim_strings(self, array, trim_strings=False, **keys):
+ """
+ if requested, trim trailing white space from
+ all string fields in the input array
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if self.trim_strings or trim_strings:
+ _trim_strings(array)
+
+ def _maybe_decode_fits_ascii_strings_to_unicode_py3(self, array):
+ if IS_PY3:
+ do_conversion = False
+ new_dt = []
+ for _dt in array.dtype.descr:
+ if 'S' in _dt[1]:
+ do_conversion = True
+ if len(_dt) == 3:
+ new_dt.append((
+ _dt[0],
+ _dt[1].replace('S', 'U').replace('|', ''),
+ _dt[2]))
+ else:
+ new_dt.append((
+ _dt[0],
+ _dt[1].replace('S', 'U').replace('|', '')))
+ else:
+ new_dt.append(_dt)
+ if do_conversion:
+ array = array.astype(new_dt, copy=copy_if_needed)
+ return array
+
+ def _convert_bool_array(self, array):
+ """
+ cfitsio reads as characters 'T' and 'F' -- convert to real boolean
+ If input is a fits bool, convert to numpy boolean
+ """
+
+ output = (array.view(np.int8) == ord('T')).astype(bool)
+ return output
+
+ def _get_tbl_numpy_dtype(self, colnum, include_endianness=True):
+ """
+ Get numpy type for the input column
+ """
+ table_type = self._info['hdutype']
+ table_type_string = _hdu_type_map[table_type]
+ try:
+ ftype = self._info['colinfo'][colnum]['eqtype']
+ if table_type == ASCII_TBL:
+ npy_type = _table_fits2npy_ascii[abs(ftype)]
+ else:
+ npy_type = _table_fits2npy[abs(ftype)]
+ except KeyError:
+ raise KeyError("unsupported %s fits data "
+ "type: %d" % (table_type_string, ftype))
+
+ istbit = False
+ if (ftype == 1):
+ istbit = True
+
+ isvar = False
+ if ftype < 0:
+ isvar = True
+ if include_endianness:
+ # if binary we will read the big endian bytes directly,
+ # if ascii we read into native byte order
+ if table_type == ASCII_TBL:
+ addstr = ''
+ else:
+ addstr = '>'
+ if npy_type not in ['u1', 'i1', 'S', 'U']:
+ npy_type = addstr+npy_type
+
+ if npy_type == 'S':
+ width = self._info['colinfo'][colnum]['width']
+ npy_type = 'S%d' % width
+ elif npy_type == 'U':
+ width = self._info['colinfo'][colnum]['width']
+ npy_type = 'U%d' % width
+
+ return npy_type, isvar, istbit
+
+ def _process_args_as_rows_or_columns(self, arg, unpack=False):
+ """
+ We must be able to interpret the args as as either a column name or
+ row number, or sequences thereof. Numpy arrays and slices are also
+ fine.
+
+ Examples:
+ 'field'
+ 35
+ [35,55,86]
+ ['f1',f2',...]
+ Can also be tuples or arrays.
+ """
+
+ flags = set()
+ #
+ if isinstance(arg, (tuple, list, np.ndarray)):
+ # a sequence was entered
+ if isstring(arg[0]):
+ result = arg
+ else:
+ result = arg
+ flags.add('isrows')
+ elif isstring(arg):
+ # a single string was entered
+ result = arg
+ elif isinstance(arg, slice):
+ if unpack:
+ flags.add('isrows')
+ result = self._slice2rows(arg.start, arg.stop, arg.step)
+ else:
+ flags.add('isrows')
+ flags.add('isslice')
+ result = self._process_slice(arg)
+ else:
+ # a single object was entered.
+ # Probably should apply some more checking on this
+ result = arg
+ flags.add('isrows')
+ if np.ndim(arg) == 0:
+ flags.add('isscalar')
+
+ return result, flags
+
+ def _read_var_column(self, colnum, rows, sortind, vstorage):
+ """
+
+ first read as a list of arrays, then copy into either a fixed length
+ array or an array of objects, depending on vstorage.
+
+ """
+
+ if IS_PY3:
+ stype = bytes
+ else:
+ stype = str
+
+ dlist = self._FITS.read_var_column_as_list(
+ self._ext+1, colnum+1, rows, sortind,
+ )
+
+ if vstorage == 'fixed':
+ tform = self._info['colinfo'][colnum]['tform']
+ max_size = _extract_vararray_max(tform)
+
+ if max_size <= 0:
+ name = self._info['colinfo'][colnum]['name']
+ mess = 'Will read as an object field'
+ if max_size < 0:
+ mess = "Column '%s': No maximum size: '%s'. %s"
+ mess = mess % (name, tform, mess)
+ warnings.warn(mess, FITSRuntimeWarning)
+ else:
+ mess = "Column '%s': Max size is zero: '%s'. %s"
+ mess = mess % (name, tform, mess)
+ warnings.warn(mess, FITSRuntimeWarning)
+
+ # we are forced to read this as an object array
+ return self._read_var_column(colnum, rows, 'object')
+
+ if isinstance(dlist[0], stype):
+ descr = 'S%d' % max_size
+ array = np.fromiter(dlist, descr)
+ if IS_PY3:
+ array = array.astype('U', copy=copy_if_needed)
+ else:
+ descr = dlist[0].dtype.str
+ array = np.zeros((len(dlist), max_size), dtype=descr)
+
+ for irow, item in enumerate(dlist):
+ if sortind is not None:
+ irow = sortind[irow]
+ ncopy = len(item)
+ array[irow, 0:ncopy] = item[:]
+ else:
+ array = np.zeros(len(dlist), dtype='O')
+ for irow, item in enumerate(dlist):
+ if sortind is not None:
+ irow = sortind[irow]
+
+ if IS_PY3 and isinstance(item, bytes):
+ item = item.decode('ascii')
+ array[irow] = item
+
+ return array
+
+ def _extract_colnums(self, columns=None):
+ """
+ Extract an array of columns from the input
+ """
+ if columns is None:
+ return np.arange(self._ncol, dtype='i8')
+
+ if not isinstance(columns, (tuple, list, np.ndarray)):
+ # is a scalar
+ return self._extract_colnum(columns)
+
+ colnums = np.zeros(len(columns), dtype='i8')
+ for i in xrange(colnums.size):
+ colnums[i] = self._extract_colnum(columns[i])
+
+ # returns unique sorted
+ colnums = np.unique(colnums)
+ return colnums
+
+ def _extract_colnum(self, col):
+ """
+ Get the column number for the input column
+ """
+ if isinteger(col):
+ colnum = col
+
+ if (colnum < 0) or (colnum > (self._ncol-1)):
+ raise ValueError(
+ "column number should be in [0,%d]" % (self._ncol-1)
+ )
+ else:
+ colstr = mks(col)
+ try:
+ if self.case_sensitive:
+ mess = "column name '%s' not found (case sensitive)" % col
+ colnum = self._colnames.index(colstr)
+ else:
+ mess \
+ = "column name '%s' not found (case insensitive)" % col
+ colnum = self._colnames_lower.index(colstr.lower())
+ except ValueError:
+ raise ValueError(mess)
+ return int(colnum)
+
+ def _update_info(self):
+ """
+ Call parent method and make sure this is in fact a
+ table HDU. Set some convenience data.
+ """
+ super(TableHDU, self)._update_info()
+ if self._info['hdutype'] == IMAGE_HDU:
+ mess = "Extension %s is not a Table HDU" % self.ext
+ raise ValueError(mess)
+ if 'colinfo' in self._info:
+ self._colnames = [i['name'] for i in self._info['colinfo']]
+ self._colnames_lower = [
+ i['name'].lower() for i in self._info['colinfo']]
+ self._ncol = len(self._colnames)
+
+ def __getitem__(self, arg):
+ """
+ Get data from a table using python [] notation.
+
+ You can use [] to extract column and row subsets, or read everything.
+ The notation is essentially the same as numpy [] notation, except that
+ a sequence of column names may also be given. Examples reading from
+ "filename", extension "ext"
+
+ fits=fitsio.FITS(filename)
+ fits[ext][:]
+ fits[ext][2] # returns a scalar
+ fits[ext][2:5]
+ fits[ext][200:235:2]
+ fits[ext][rows]
+ fits[ext][cols][rows]
+
+ Note data are only read once the rows are specified.
+
+ Note you can only read variable length arrays the default way,
+ using this function, so set it as you want on construction.
+
+ This function is used for ascii tables as well
+ """
+
+ res, flags = \
+ self._process_args_as_rows_or_columns(arg)
+
+ if 'isrows' in flags:
+ # rows were entered: read all columns
+ if 'isslice' in flags:
+ array = self.read_slice(res.start, res.stop, res.step)
+ else:
+ # will also get here if slice is entered but this
+ # is an ascii table
+ array = self.read(rows=res)
+ else:
+ return TableColumnSubset(self, res)
+
+ if self.lower:
+ _names_to_lower_if_recarray(array)
+ elif self.upper:
+ _names_to_upper_if_recarray(array)
+
+ self._maybe_trim_strings(array)
+
+ if 'isscalar' in flags:
+ assert array.shape[0] == 1
+ array = array[0]
+ return array
+
+ def __iter__(self):
+ """
+ Get an iterator for a table
+
+ e.g.
+ f=fitsio.FITS(fname)
+ hdu1 = f[1]
+ for row in hdu1:
+ ...
+ """
+
+ # always start with first row
+ self._iter_row = 0
+
+ # for iterating we must assume the number of rows will not change
+ self._iter_nrows = self.get_nrows()
+
+ self._buffer_iter_rows(0)
+ return self
+
+ def next(self):
+ """
+ get the next row when iterating
+
+ e.g.
+ f=fitsio.FITS(fname)
+ hdu1 = f[1]
+ for row in hdu1:
+ ...
+
+ By default read one row at a time. Send iter_row_buffer to get a more
+ efficient buffering.
+ """
+ return self._get_next_buffered_row()
+
+ __next__ = next
+
+ def _get_next_buffered_row(self):
+ """
+ Get the next row for iteration.
+ """
+ if self._iter_row == self._iter_nrows:
+ raise StopIteration
+
+ if self._row_buffer_index >= self._iter_row_buffer:
+ self._buffer_iter_rows(self._iter_row)
+
+ data = self._row_buffer[self._row_buffer_index]
+ self._iter_row += 1
+ self._row_buffer_index += 1
+ return data
+
+ def _buffer_iter_rows(self, start):
+ """
+ Read in the buffer for iteration
+ """
+ self._row_buffer = self[start:start+self._iter_row_buffer]
+
+ # start back at the front of the buffer
+ self._row_buffer_index = 0
+
+ def __repr__(self):
+ """
+ textual representation for some metadata
+ """
+ text, spacing = self._get_repr_list()
+
+ text.append('%srows: %d' % (spacing, self._info['nrows']))
+ text.append('%scolumn info:' % spacing)
+
+ cspacing = ' '*4
+ nspace = 4
+ nname = 15
+ ntype = 6
+ format = cspacing + "%-" + str(nname) + "s %" + str(ntype) + "s %s"
+ pformat = (
+ cspacing + "%-" +
+ str(nname) + "s\n %" +
+ str(nspace+nname+ntype) + "s %s")
+
+ for colnum, c in enumerate(self._info['colinfo']):
+ if len(c['name']) > nname:
+ f = pformat
+ else:
+ f = format
+
+ dt, isvar, istbit = self._get_tbl_numpy_dtype(
+ colnum, include_endianness=False)
+ if isvar:
+ tform = self._info['colinfo'][colnum]['tform']
+ if dt[0] == 'S':
+ dt = 'S0'
+ dimstr = 'vstring[%d]' % _extract_vararray_max(tform)
+ else:
+ dimstr = 'varray[%s]' % _extract_vararray_max(tform)
+ else:
+ if dt[0] == 'S':
+ is_string = True
+ else:
+ is_string = False
+ dimstr = _get_col_dimstr(c['tdim'], is_string=is_string)
+
+ s = f % (c['name'], dt, dimstr)
+ text.append(s)
+
+ text = '\n'.join(text)
+ return text
+
+
+class AsciiTableHDU(TableHDU):
+ def read(self, rows=None, columns=None, vstorage=None,
+ upper=False, lower=False, trim_strings=False, **keys):
+ """
+ read a data from an ascii table HDU
+
+ By default, all rows are read. Send rows= to select subsets of the
+ data. Table data are read into a recarray for multiple columns,
+ plain array for a single column.
+
+ parameters
+ ----------
+ columns: list/array
+ An optional set of columns to read from table HDUs. Can be string
+ or number. If a sequence, a recarray is always returned. If a
+ scalar, an ordinary array is returned.
+ rows: list/array, optional
+ An optional list of rows to read from table HDUS. Default is to
+ read all.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ # if columns is None, returns all. Guaranteed to be unique and sorted
+ colnums = self._extract_colnums(columns)
+ if isinstance(colnums, int):
+ # scalar sent, don't read as a recarray
+ return self.read_column(
+ columns, rows=rows, vstorage=vstorage,
+ upper=upper, lower=lower, trim_strings=trim_strings)
+
+ rows, sortind = self._extract_rows(rows)
+ if rows is None:
+ nrows = self._info['nrows']
+ else:
+ nrows = rows.size
+
+ # if rows is None still returns None, and is correctly interpreted
+ # by the reader to mean all
+ rows, sortind = self._extract_rows(rows)
+
+ # this is the full dtype for all columns
+ dtype, offsets, isvar = self.get_rec_dtype(
+ colnums=colnums, vstorage=vstorage)
+ array = np.zeros(nrows, dtype=dtype)
+
+ # note reading into existing data
+ wnotvar, = np.where(isvar == False) # noqa
+ if wnotvar.size > 0:
+ for i in wnotvar:
+ colnum = colnums[i]
+ name = array.dtype.names[i]
+ a = array[name].copy()
+ self._FITS.read_column(self._ext+1, colnum+1, a, rows, sortind)
+ array[name] = a
+ del a
+
+ array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
+
+ wvar, = np.where(isvar == True) # noqa
+ if wvar.size > 0:
+ for i in wvar:
+ colnum = colnums[i]
+ name = array.dtype.names[i]
+ dlist = self._FITS.read_var_column_as_list(
+ self._ext+1, colnum+1, rows, sortind,
+ )
+ if (isinstance(dlist[0], str) or
+ (IS_PY3 and isinstance(dlist[0], bytes))):
+ is_string = True
+ else:
+ is_string = False
+
+ if array[name].dtype.descr[0][1][1] == 'O':
+ # storing in object array
+ # get references to each, no copy made
+ for irow, item in enumerate(dlist):
+ if sortind is not None:
+ irow = sortind[irow]
+ if IS_PY3 and isinstance(item, bytes):
+ item = item.decode('ascii')
+ array[name][irow] = item
+ else:
+ for irow, item in enumerate(dlist):
+ if sortind is not None:
+ irow = sortind[irow]
+ if IS_PY3 and isinstance(item, bytes):
+ item = item.decode('ascii')
+ if is_string:
+ array[name][irow] = item
+ else:
+ ncopy = len(item)
+ array[name][irow][0:ncopy] = item[:]
+
+ if self.lower or lower:
+ _names_to_lower_if_recarray(array)
+ elif self.upper or upper:
+ _names_to_upper_if_recarray(array)
+
+ self._maybe_trim_strings(array, trim_strings=trim_strings)
+
+ return array
+
+ read_ascii = read
+
+
+class TableColumnSubset(object):
+ """
+
+ A class representing a subset of the the columns on disk. When called
+ with .read() or [ rows ] the data are read from disk.
+
+ Useful because subsets can be passed around to functions, or chained
+ with a row selection.
+
+ This class is returned when using [ ] notation to specify fields in a
+ TableHDU class
+
+ fits = fitsio.FITS(fname)
+ colsub = fits[ext][field_list]
+
+ returns a TableColumnSubset object. To read rows:
+
+ data = fits[ext][field_list][row_list]
+
+ colsub = fits[ext][field_list]
+ data = colsub[row_list]
+ data = colsub.read(rows=row_list)
+
+ to read all, use .read() with no args or [:]
+ """
+
+ def __init__(self, fitshdu, columns):
+ """
+ Input is the FITS instance and a list of column names.
+ """
+
+ self.columns = columns
+ if isstring(columns) or isinteger(columns):
+ # this is to check if it exists
+ self.colnums = [fitshdu._extract_colnum(columns)]
+
+ self.is_scalar = True
+ self.columns_list = [columns]
+ else:
+ # this is to check if it exists
+ self.colnums = fitshdu._extract_colnums(columns)
+
+ self.is_scalar = False
+ self.columns_list = columns
+
+ self.fitshdu = fitshdu
+
+ def read(self, columns=None, rows=None, vstorage=None, lower=False,
+ upper=False, trim_strings=False, **keys):
+ """
+ Read the data from disk and return as a numpy array
+
+ parameters
+ ----------
+ columns: list/array, optional
+ An optional set of columns to read from table HDUs. Can be string
+ or number. If a sequence, a recarray is always returned. If a
+ scalar, an ordinary array is returned.
+ rows: optional
+ An optional list of rows to read from table HDUS. Default is to
+ read all.
+ vstorage: string, optional
+ Over-ride the default method to store variable length columns. Can
+ be 'fixed' or 'object'. See docs on fitsio.FITS for details.
+ lower: bool, optional
+ If True, force all columns names to lower case in output. Will over
+ ride the lower= keyword from construction.
+ upper: bool, optional
+ If True, force all columns names to upper case in output. Will over
+ ride the lower= keyword from construction.
+ trim_strings: bool, optional
+ If True, trim trailing spaces from strings. Will over-ride the
+ trim_strings= keyword from constructor.
+ """
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if self.is_scalar:
+ data = self.fitshdu.read_column(
+ self.columns,
+ rows=rows, vstorage=vstorage, lower=lower, upper=upper,
+ trim_strings=trim_strings)
+ else:
+ if columns is None:
+ c = self.columns
+ else:
+ c = columns
+ data = self.fitshdu.read(
+ columns=c,
+ rows=rows, vstorage=vstorage, lower=lower, upper=upper,
+ trim_strings=trim_strings)
+
+ return data
+
+ def __getitem__(self, arg):
+ """
+ If columns are sent, then the columns will just get reset and
+ we'll return a new object
+
+ If rows are sent, they are read and the result returned.
+ """
+
+ # we have to unpack the rows if we are reading a subset
+ # of the columns because our slice operator only works
+ # on whole rows. We could allow rows= keyword to
+ # be a slice...
+
+ res, flags = \
+ self.fitshdu._process_args_as_rows_or_columns(arg, unpack=True)
+ if 'isrows' in flags:
+ # rows was entered: read all current column subset
+ array = self.read(rows=res)
+ if 'isscalar' in flags:
+ assert array.shape[0] == 1
+ array = array[0]
+ return array
+ else:
+ # columns was entered. Return a subset objects
+ return TableColumnSubset(self.fitshdu, columns=res)
+
+ def __repr__(self):
+ """
+ Representation for TableColumnSubset
+ """
+ spacing = ' '*2
+ cspacing = ' '*4
+
+ hdu = self.fitshdu
+ info = self.fitshdu._info
+ colinfo = info['colinfo']
+
+ text = []
+ text.append("%sfile: %s" % (spacing, hdu._filename))
+ text.append("%sextension: %d" % (spacing, info['hdunum']-1))
+ text.append("%stype: %s" % (spacing, _hdu_type_map[info['hdutype']]))
+ text.append('%srows: %d' % (spacing, info['nrows']))
+ text.append("%scolumn subset:" % spacing)
+
+ cspacing = ' '*4
+ nspace = 4
+ nname = 15
+ ntype = 6
+ format = cspacing + "%-" + str(nname) + "s %" + str(ntype) + "s %s"
+ pformat = (
+ cspacing + "%-" + str(nname) + "s\n %" +
+ str(nspace+nname+ntype) + "s %s")
+
+ for colnum in self.colnums:
+ cinfo = colinfo[colnum]
+
+ if len(cinfo['name']) > nname:
+ f = pformat
+ else:
+ f = format
+
+ dt, isvar, istbit = hdu._get_tbl_numpy_dtype(
+ colnum, include_endianness=False)
+ if isvar:
+ tform = cinfo['tform']
+ if dt[0] == 'S':
+ dt = 'S0'
+ dimstr = 'vstring[%d]' % _extract_vararray_max(tform)
+ else:
+ dimstr = 'varray[%s]' % _extract_vararray_max(tform)
+ else:
+ dimstr = _get_col_dimstr(cinfo['tdim'])
+
+ s = f % (cinfo['name'], dt, dimstr)
+ text.append(s)
+
+ s = "\n".join(text)
+ return s
+
+
+def _tdim2shape(tdim, name, is_string=False):
+ shape = None
+ if tdim is None:
+ raise ValueError("field '%s' has malformed TDIM" % name)
+
+ if len(tdim) > 1 or tdim[0] > 1:
+ if is_string:
+ shape = list(reversed(tdim[1:]))
+ else:
+ shape = list(reversed(tdim))
+
+ if len(shape) == 1:
+ shape = shape[0]
+ else:
+ shape = tuple(shape)
+
+ return shape
+
+
+def _names_to_lower_if_recarray(data):
+ if data.dtype.names is not None:
+ data.dtype.names = [n.lower() for n in data.dtype.names]
+
+
+def _names_to_upper_if_recarray(data):
+ if data.dtype.names is not None:
+ data.dtype.names = [n.upper() for n in data.dtype.names]
+
+
+def _trim_strings(data):
+ names = data.dtype.names
+ if names is not None:
+ # run through each field separately
+ for n in names:
+ if data[n].dtype.descr[0][1][1] in ['S', 'U']:
+ data[n] = np.char.rstrip(data[n])
+ else:
+ if data.dtype.descr[0][1][1] in ['S', 'U']:
+ data[:] = np.char.rstrip(data[:])
+
+
+def _extract_vararray_max(tform):
+ """
+ Extract number from PX(number)
+ """
+ first = tform.find('(')
+ last = tform.rfind(')')
+
+ if first == -1 or last == -1:
+ # no max length specified
+ return -1
+
+ maxnum = int(tform[first+1:last])
+ return maxnum
+
+
+def _get_col_dimstr(tdim, is_string=False):
+ """
+ not for variable length
+ """
+ dimstr = ''
+ if tdim is None:
+ dimstr = 'array[bad TDIM]'
+ else:
+ if is_string:
+ if len(tdim) > 1:
+ dimstr = [str(d) for d in tdim[1:]]
+ else:
+ if len(tdim) > 1 or tdim[0] > 1:
+ dimstr = [str(d) for d in tdim]
+ if dimstr != '':
+ dimstr = ','.join(dimstr)
+ dimstr = 'array[%s]' % dimstr
+
+ return dimstr
+
+
+# no support yet for complex
+# all strings are read as bytes for python3 and then decoded to unicode
+_table_fits2npy = {1: 'i1',
+ 11: 'u1',
+ 12: 'i1',
+ # logical. Note pyfits uses this for i1,
+ # cfitsio casts to char*
+ 14: 'b1',
+ 16: 'S',
+ 20: 'u2',
+ 21: 'i2',
+ 30: 'u4', # 30=TUINT
+ 31: 'i4', # 31=TINT
+ 40: 'u4', # 40=TULONG
+ 41: 'i4', # 41=TLONG
+ 42: 'f4',
+ 81: 'i8',
+ 82: 'f8',
+ 83: 'c8', # TCOMPLEX
+ 163: 'c16'} # TDBLCOMPLEX
+
+# cfitsio returns only types f8, i4 and strings for column types. in order to
+# avoid data loss, we always use i8 for integer types
+# all strings are read as bytes for python3 and then decoded to unicode
+_table_fits2npy_ascii = {16: 'S',
+ 31: 'i8', # listed as TINT, reading as i8
+ 41: 'i8', # listed as TLONG, reading as i8
+ 81: 'i8',
+ 21: 'i4', # listed as TSHORT, reading as i4
+ 42: 'f8', # listed as TFLOAT, reading as f8
+ 82: 'f8'}
+
+# for TFORM
+_table_npy2fits_form = {'b1': 'L',
+ 'u1': 'B',
+ 'i1': 'S', # gets converted to unsigned
+ 'S': 'A',
+ 'U': 'A',
+ 'u2': 'U', # gets converted to signed
+ 'i2': 'I',
+ 'u4': 'V', # gets converted to signed
+ 'i4': 'J',
+ 'i8': 'K',
+ 'f4': 'E',
+ 'f8': 'D',
+ 'c8': 'C',
+ 'c16': 'M'}
+
+# from mrdfits; note G gets turned into E
+# types= ['A', 'I', 'L', 'B', 'F', 'D', 'C', 'M', 'K']
+# formats=['A1', 'I6', 'I10', 'I4', 'G15.9','G23.17', 'G15.9', 'G23.17',
+# 'I20']
+
+_table_npy2fits_form_ascii = {'S': 'A1', # Need to add max here
+ 'U': 'A1', # Need to add max here
+ 'i2': 'I7', # I
+ 'i4': 'I12', # ??
+ # 'i8':'I21', # K # i8 aren't supported
+ # 'f4':'E15.7', # F
+ # F We must write as f8 since we can only
+ # read as f8
+ 'f4': 'E26.17',
+ # D 25.16 looks right, but this is recommended
+ 'f8': 'E26.17'}
+
+
+def _npy2fits(d, table_type='binary', write_bitcols=False):
+ """
+ d is the full element from the descr
+ """
+ npy_dtype = d[1][1:]
+ if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
+ name, form, dim = _npy_string2fits(d, table_type=table_type)
+ else:
+ name, form, dim = _npy_num2fits(
+ d, table_type=table_type, write_bitcols=write_bitcols)
+
+ return name, form, dim
+
+
+def _npy_num2fits(d, table_type='binary', write_bitcols=False):
+ """
+ d is the full element from the descr
+
+ For vector,array columns the form is the total counts
+ followed by the code.
+
+ For array columns with dimension greater than 1, the dim is set to
+ (dim1, dim2, ...)
+ So it is treated like an extra dimension
+
+ """
+
+ dim = None
+
+ name = d[0]
+
+ npy_dtype = d[1][1:]
+ if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
+ raise ValueError("got S or U type: use _npy_string2fits")
+
+ if npy_dtype not in _table_npy2fits_form:
+ raise ValueError("unsupported type '%s'" % npy_dtype)
+
+ if table_type == 'binary':
+ form = _table_npy2fits_form[npy_dtype]
+ else:
+ form = _table_npy2fits_form_ascii[npy_dtype]
+
+ # now the dimensions
+ if len(d) > 2:
+ if table_type == 'ascii':
+ raise ValueError(
+ "Ascii table columns must be scalar, got %s" % str(d))
+
+ if write_bitcols and npy_dtype == 'b1':
+ # multi-dimensional boolean
+ form = 'X'
+
+ # Note, depending on numpy version, even 1-d can be a tuple
+ if isinstance(d[2], tuple):
+ count = reduce(lambda x, y: x*y, d[2])
+ form = '%d%s' % (count, form)
+
+ if len(d[2]) > 1:
+ # this is multi-dimensional array column. the form
+ # should be total elements followed by A
+ dim = list(reversed(d[2]))
+ dim = [str(e) for e in dim]
+ dim = '(' + ','.join(dim)+')'
+ else:
+ # this is a vector (1d array) column
+ count = d[2]
+ form = '%d%s' % (count, form)
+
+ return name, form, dim
+
+
+def _npy_string2fits(d, table_type='binary'):
+ """
+ d is the full element from the descr
+
+ form for strings is the total number of bytes followed by A. Thus
+ for vector or array columns it is the size of the string times the
+ total number of elements in the array.
+
+ Then the dim is set to
+ (sizeofeachstring, dim1, dim2, ...)
+ So it is treated like an extra dimension
+
+ """
+
+ dim = None
+
+ name = d[0]
+
+ npy_dtype = d[1][1:]
+ if npy_dtype[0] != 'S' and npy_dtype[0] != 'U':
+ raise ValueError("expected S or U type, got %s" % npy_dtype[0])
+
+ # get the size of each string
+ string_size_str = npy_dtype[1:]
+ string_size = int(string_size_str)
+
+ if string_size <= 0:
+ raise ValueError('string sizes must be > 0, '
+ 'got %s for field %s' % (npy_dtype, name))
+
+ # now the dimensions
+ if len(d) == 2:
+ if table_type == 'ascii':
+ form = 'A'+string_size_str
+ else:
+ form = string_size_str+'A'
+ else:
+ if table_type == 'ascii':
+ raise ValueError(
+ "Ascii table columns must be scalar, got %s" % str(d))
+ if isinstance(d[2], tuple):
+ # this is an array column. the form
+ # should be total elements followed by A
+ # count = 1
+ # count = [count*el for el in d[2]]
+ count = reduce(lambda x, y: x*y, d[2])
+ count = string_size*count
+ form = '%dA' % count
+
+ # will have to do tests to see if this is the right order
+ dim = list(reversed(d[2]))
+ # dim = d[2]
+ dim = [string_size_str] + [str(e) for e in dim]
+ dim = '(' + ','.join(dim)+')'
+ else:
+ # this is a vector (1d array) column
+ count = string_size*d[2]
+ form = '%dA' % count
+
+ # will have to do tests to see if this is the right order
+ dim = [string_size_str, str(d[2])]
+ dim = '(' + ','.join(dim)+')'
+
+ return name, form, dim
--- /dev/null
+"""
+header classes for fitslib, part of the fitsio package.
+
+See the main docs at https://github.com/esheldon/fitsio
+
+ Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+from __future__ import with_statement, print_function
+import warnings
+
+from . import _fitsio_wrap
+from .util import isstring, FITSRuntimeWarning, IS_PY3
+
+# for python3 compat
+if IS_PY3:
+ xrange = range
+
+TYP_STRUC_KEY = 10
+TYP_CMPRS_KEY = 20
+TYP_SCAL_KEY = 30
+TYP_NULL_KEY = 40
+TYP_DIM_KEY = 50
+TYP_RANG_KEY = 60
+TYP_UNIT_KEY = 70
+TYP_DISP_KEY = 80
+TYP_HDUID_KEY = 90
+TYP_CKSUM_KEY = 100
+TYP_WCS_KEY = 110
+TYP_REFSYS_KEY = 120
+TYP_COMM_KEY = 130
+TYP_CONT_KEY = 140
+TYP_USER_KEY = 150
+
+
+class FITSHDR(object):
+ """
+ A class representing a FITS header.
+
+ parameters
+ ----------
+ record_list: optional
+ A list of dicts, or dict, or another FITSHDR
+ - list of dictionaries containing 'name','value' and optionally
+ a 'comment' field; the order is preserved.
+ - a dictionary of keyword-value pairs; no comments are written
+ in this case, and the order is arbitrary.
+ - another FITSHDR object; the order is preserved.
+
+ examples:
+
+ hdr=FITSHDR()
+
+ # set a simple value
+ hdr['blah'] = 35
+
+ # set from a dict to include a comment.
+ rec={'name':'fromdict', 'value':3, 'comment':'my comment'}
+ hdr.add_record(rec)
+
+ # can do the same with a full FITSRecord
+ rec=FITSRecord( {'name':'temp', 'value':35, 'comment':'temp in C'} )
+ hdr.add_record(rec)
+
+ # in the above, the record is replaced if one with the same name
+ # exists, except for COMMENT and HISTORY, which can exist as
+ # duplicates
+
+ # print the header
+ print(hdr)
+
+ # print a single record
+ print(hdr['fromdict'])
+
+
+ # can also set from a card
+ hdr.add_record('test = 77')
+ # using a FITSRecord object (internally uses FITSCard)
+ card=FITSRecord('test = 77')
+ hdr.add_record(card)
+
+ # can also construct with a record list
+ recs=[{'name':'test', 'value':35, 'comment':'a comment'},
+ {'name':'blah', 'value':'some string'}]
+ hdr=FITSHDR(recs)
+
+ # if you have no comments, you can construct with a simple dict
+ recs={'day':'saturday',
+ 'telescope':'blanco'}
+ hdr=FITSHDR(recs)
+
+ """
+ def __init__(self, record_list=None):
+
+ self._record_list = []
+ self._record_map = {}
+ self._index_map = {}
+
+ if isinstance(record_list, FITSHDR):
+ for r in record_list.records():
+ self.add_record(r)
+ elif isinstance(record_list, dict):
+ for k in record_list:
+ r = {'name': k, 'value': record_list[k]}
+ self.add_record(r)
+ elif isinstance(record_list, list):
+ for r in record_list:
+ self.add_record(r)
+ elif record_list is not None:
+ raise ValueError("expected a dict or list of dicts or FITSHDR")
+
+ def add_record(self, record_in):
+ """
+ Add a new record. Strip quotes from around strings.
+
+ This will over-write if the key already exists, except
+ for COMMENT and HISTORY fields
+
+ parameters
+ -----------
+ record:
+ The record, either a dict or a header card string
+ or a FITSRecord or FITSCard
+ """
+ if (isinstance(record_in, dict) and
+ 'name' in record_in and 'value' in record_in):
+ record = {}
+ record.update(record_in)
+ else:
+ record = FITSRecord(record_in)
+
+ # only append when this name already exists if it is
+ # a comment or history field, otherwise simply over-write
+ key = record['name']
+ if key is not None:
+ key = key.upper()
+
+ key_exists = key in self._record_map
+
+ if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE', None):
+ # append new record
+ self._record_list.append(record)
+ index = len(self._record_list)-1
+ self._index_map[key] = index
+ else:
+ # over-write existing
+ index = self._index_map[key]
+ self._record_list[index] = record
+
+ self._record_map[key] = record
+
+ def _add_to_map(self, record):
+ key = record['name'].upper()
+ self._record_map[key] = record
+
+ def get_comment(self, item):
+ """
+ Get the comment for the requested entry
+ """
+ key = item.upper()
+ if key not in self._record_map:
+ raise KeyError("unknown record: %s" % key)
+
+ if 'comment' not in self._record_map[key]:
+ return None
+ else:
+ return self._record_map[key]['comment']
+
+ def records(self):
+ """
+ Return the list of full records as a list of dictionaries.
+ """
+ return self._record_list
+
+ def keys(self):
+ """
+ Return a copy of the current key list.
+ """
+ return [e['name'] for e in self._record_list]
+
+ def delete(self, name):
+ """
+ Delete the specified entry if it exists.
+ """
+ if isinstance(name, (list, tuple)):
+ for xx in name:
+ self.delete(xx)
+ else:
+ if name in self._record_map:
+ del self._record_map[name]
+ # Store current index value
+ cur_index = self._index_map[name]
+ # Delete in index map
+ del self._index_map[name]
+ self._record_list = [
+ r for r in self._record_list if r['name'] != name]
+
+ # Change index map for superior indexes, only
+ for k, v in self._index_map.items():
+ if v > cur_index:
+ self._index_map[k] = v - 1
+
+ def clean(self, is_table=False):
+ """
+ Remove reserved keywords from the header.
+
+ These are keywords that the fits writer must write in order
+ to maintain consistency between header and data.
+
+ keywords
+ --------
+ is_table: bool, optional
+ Set True if this is a table, so extra keywords will be cleaned
+ """
+
+ rmnames = [
+ 'SIMPLE', 'EXTEND', 'XTENSION', 'BITPIX', 'PCOUNT', 'GCOUNT',
+ 'THEAP',
+ 'EXTNAME',
+ # 'BLANK',
+ 'ZQUANTIZ', 'ZDITHER0', 'ZIMAGE', 'ZCMPTYPE',
+ 'ZSIMPLE', 'ZTENSION', 'ZPCOUNT', 'ZGCOUNT',
+ 'ZBITPIX', 'ZEXTEND',
+ # 'FZTILELN','FZALGOR',
+ 'CHECKSUM', 'DATASUM']
+
+ if is_table:
+ # these are not allowed in tables
+ rmnames += [
+ 'BUNIT', 'BSCALE', 'BZERO',
+ ]
+
+ self.delete(rmnames)
+
+ r = self._record_map.get('NAXIS', None)
+ if r is not None:
+ naxis = int(r['value'])
+ self.delete('NAXIS')
+
+ rmnames = ['NAXIS%d' % i for i in xrange(1, naxis+1)]
+ self.delete(rmnames)
+
+ r = self._record_map.get('ZNAXIS', None)
+ self.delete('ZNAXIS')
+ if r is not None:
+
+ znaxis = int(r['value'])
+
+ rmnames = ['ZNAXIS%d' % i for i in xrange(1, znaxis+1)]
+ self.delete(rmnames)
+ rmnames = ['ZTILE%d' % i for i in xrange(1, znaxis+1)]
+ self.delete(rmnames)
+ rmnames = ['ZNAME%d' % i for i in xrange(1, znaxis+1)]
+ self.delete(rmnames)
+ rmnames = ['ZVAL%d' % i for i in xrange(1, znaxis+1)]
+ self.delete(rmnames)
+
+ r = self._record_map.get('TFIELDS', None)
+ if r is not None:
+ tfields = int(r['value'])
+ self.delete('TFIELDS')
+
+ if tfields > 0:
+
+ nbase = [
+ 'TFORM', 'TTYPE', 'TDIM', 'TUNIT', 'TSCAL', 'TZERO',
+ 'TNULL', 'TDISP', 'TDMIN', 'TDMAX', 'TDESC', 'TROTA',
+ 'TRPIX', 'TRVAL', 'TDELT', 'TCUNI',
+ # 'FZALG'
+ ]
+ for i in xrange(1, tfields+1):
+ names = ['%s%d' % (n, i) for n in nbase]
+ self.delete(names)
+
+ def get(self, item, default_value=None):
+ """
+ Get the requested header entry by keyword name
+ """
+
+ found, name = self._contains_and_name(item)
+ if found:
+ return self._record_map[name]['value']
+ else:
+ return default_value
+
+ def __len__(self):
+ return len(self._record_list)
+
+ def __contains__(self, item):
+ found, _ = self._contains_and_name(item)
+ return found
+
+ def _contains_and_name(self, item):
+
+ if isinstance(item, FITSRecord):
+ name = item['name']
+ elif isinstance(item, dict):
+ name = item.get('name', None)
+ if name is None:
+ raise ValueError("dict record must have 'name' field")
+ else:
+ name = item
+
+ found = False
+ if name is None:
+ if None in self._record_map:
+ found = True
+ else:
+ name = name.upper()
+ if name in self._record_map:
+ found = True
+ elif name[0:8] == 'HIERARCH':
+ if len(name) > 9:
+ name = name[9:]
+ if name in self._record_map:
+ found = True
+
+ return found, name
+
+ def __setitem__(self, item, value):
+ if isinstance(value, (dict, FITSRecord)):
+ if item.upper() != value['name'].upper():
+ raise ValueError("when setting using a FITSRecord, the "
+ "name field must match")
+ rec = value
+ else:
+ rec = {'name': item, 'value': value}
+
+ try:
+ # the entry may already exist; if so, preserve the comment
+ comment = self.get_comment(item)
+ rec['comment'] = comment
+ except KeyError:
+ pass
+
+ self.add_record(rec)
+
+ def __getitem__(self, item):
+ if item not in self:
+ raise KeyError("unknown record: %s" % item)
+
+ return self.get(item)
+
+ def __iter__(self):
+ self._current = 0
+ return self
+
+ def next(self):
+ """
+ for iteration over the header entries
+ """
+ if self._current < len(self._record_list):
+ rec = self._record_list[self._current]
+ key = rec['name']
+ self._current += 1
+ return key
+ else:
+ raise StopIteration
+ __next__ = next
+
+ def _record2card(self, record):
+ """
+ when we add new records they don't have a card,
+ this sort of fakes it up similar to what cfitsio
+ does, just for display purposes. e.g.
+
+ DBL = 23.299843
+ LNG = 3423432
+ KEYSNC = 'hello '
+ KEYSC = 'hello ' / a comment for string
+ KEYDC = 3.14159265358979 / a comment for pi
+ KEYLC = 323423432 / a comment for long
+
+ basically,
+ - 8 chars, left aligned, for the keyword name
+ - a space
+ - 20 chars for value, left aligned for strings, right aligned for
+ numbers
+ - if there is a comment, one space followed by / then another space
+ then the comment out to 80 chars
+
+ """
+ name = record['name']
+ value = record['value']
+ comment = record.get('comment', '')
+
+ v_isstring = isstring(value)
+
+ if name is None:
+ card = ' %s' % comment
+ elif name == 'COMMENT':
+ card = 'COMMENT %s' % comment
+ elif name == 'CONTINUE':
+ card = 'CONTINUE %s' % value
+ elif name == 'HISTORY':
+ card = 'HISTORY %s' % value
+ else:
+ if len(name) > 8:
+ card = 'HIERARCH %s= ' % name
+ else:
+ card = '%-8s= ' % name[0:8]
+
+ # these may be string representations of data, or actual strings
+ if v_isstring:
+ value = str(value)
+ if len(value) > 0:
+ if value[0] != "'":
+ # this is a string representing a string header field
+ # make it look like it will look in the header
+ value = "'" + value + "'"
+ vstr = '%-20s' % value
+ else:
+ vstr = "%20s" % value
+ else:
+ vstr = "''"
+ else:
+ if value is True:
+ value = 'T'
+ elif value is False:
+ value = 'F'
+
+ # upper for things like 1.0E20 rather than 1.0e20
+ vstr = ('%20s' % value).upper()
+
+ card += vstr
+
+ if 'comment' in record:
+ card += ' / %s' % record['comment']
+
+ if v_isstring and len(card) > 80:
+ card = card[0:79] + "'"
+ else:
+ card = card[0:80]
+
+ return card
+
+ def __repr__(self):
+ rep = ['']
+ for r in self._record_list:
+ card = self._record2card(r)
+ # if 'card_string' not in r:
+ # card = self._record2card(r)
+ # else:
+ # card = r['card_string']
+
+ rep.append(card)
+ return '\n'.join(rep)
+
+
+class FITSRecord(dict):
+ """
+ Class to represent a FITS header record
+
+ parameters
+ ----------
+ record: string or dict
+ If a string, it should represent a FITS header card
+
+ If a dict it should have 'name' and 'value' fields.
+ Can have a 'comment' field.
+
+ examples
+ --------
+
+ # from a dict. Can include a comment
+ rec=FITSRecord( {'name':'temp', 'value':35, 'comment':'temperature in C'} )
+
+ # from a card
+ card=FITSRecord('test = 77 / My comment')
+
+ """
+ def __init__(self, record):
+ self.set_record(record)
+
+ def set_record(self, record, **keys):
+ """
+ check the record is valid and set keys in the dict
+
+ parameters
+ ----------
+ record: string
+ Dict representing a record or a string representing a FITS header
+ card
+ """
+
+ if keys:
+ import warnings
+ warnings.warn(
+ "The keyword arguments '%s' are being ignored! This warning "
+ "will be an error in a future version of `fitsio`!" % keys,
+ DeprecationWarning, stacklevel=2)
+
+ if isstring(record):
+ card = FITSCard(record)
+ self.update(card)
+
+ self.verify()
+
+ else:
+
+ if isinstance(record, FITSRecord):
+ self.update(record)
+ elif isinstance(record, dict):
+ if 'name' in record and 'value' in record:
+ self.update(record)
+
+ elif 'card_string' in record:
+ self.set_record(record['card_string'])
+
+ else:
+ raise ValueError('record must have name,value fields '
+ 'or a card_string field')
+ else:
+ raise ValueError("record must be a string card or "
+ "dictionary or FITSRecord")
+
+ def verify(self):
+ """
+ make sure name,value exist
+ """
+ if 'name' not in self:
+ raise ValueError("each record must have a 'name' field")
+ if 'value' not in self:
+ raise ValueError("each record must have a 'value' field")
+
+
+_BLANK = ' '
+
+
+class FITSCard(FITSRecord):
+ """
+ class to represent ordinary FITS cards.
+
+ CONTINUE not supported
+
+ examples
+ --------
+
+ # from a card
+ card=FITSRecord('test = 77 / My comment')
+ """
+ def __init__(self, card_string):
+ self.set_card(card_string)
+
+ def set_card(self, card_string):
+ self['card_string'] = card_string
+
+ self._check_hierarch()
+
+ if self._is_hierarch:
+ self._set_as_key()
+ else:
+ self._check_equals()
+
+ self._check_type()
+ self._check_len()
+
+ front = card_string[0:7]
+ if (not self.has_equals() or
+ front in ['COMMENT', 'HISTORY', 'CONTINU', _BLANK]):
+
+ if front == 'HISTORY':
+ self._set_as_history()
+ elif front == 'CONTINU':
+ self._set_as_continue()
+ elif front == _BLANK:
+ self._set_as_blank()
+ else:
+ # note anything without an = and not history and not blank
+ # key comment is treated as COMMENT; this is built into
+ # cfitsio as well
+ self._set_as_comment()
+
+ if self.has_equals():
+ mess = (
+ "warning: It is not FITS-compliant for a %s header "
+ "card to include an = sign. There may be slight "
+ "inconsistencies if you write this back out to a "
+ "file.")
+ mess = mess % (card_string[:8])
+ warnings.warn(mess, FITSRuntimeWarning)
+ else:
+ self._set_as_key()
+
+ def has_equals(self):
+ """
+ True if = is in position 8
+ """
+ return self._has_equals
+
+ def _check_hierarch(self):
+ card_string = self['card_string']
+ if card_string[0:8].upper() == 'HIERARCH':
+ self._is_hierarch = True
+ else:
+ self._is_hierarch = False
+
+ def _check_equals(self):
+ """
+ check for = in position 8, set attribute _has_equals
+ """
+ card_string = self['card_string']
+ if len(card_string) < 9:
+ self._has_equals = False
+ elif card_string[8] == '=':
+ self._has_equals = True
+ else:
+ self._has_equals = False
+
+ def _set_as_key(self):
+ card_string = self['card_string']
+ res = _fitsio_wrap.parse_card(card_string)
+ if len(res) == 5:
+ keyclass, name, value, dtype, comment = res
+ else:
+ keyclass, name, dtype, comment = res
+ value = None
+
+ if keyclass == TYP_CONT_KEY:
+ raise ValueError("bad card '%s'. CONTINUE not "
+ "supported" % card_string)
+
+ self['class'] = keyclass
+ self['name'] = name
+ self['value_orig'] = value
+ self['value'] = self._convert_value(value)
+ self['dtype'] = dtype
+ self['comment'] = comment
+
+ def _set_as_blank(self):
+ self['class'] = TYP_USER_KEY
+ self['name'] = None
+ self['value'] = None
+ self['comment'] = self['card_string'][8:]
+
+ def _set_as_comment(self):
+ comment = self._extract_comm_or_hist_value()
+
+ self['class'] = TYP_COMM_KEY
+ self['name'] = 'COMMENT'
+ self['value'] = comment
+
+ def _set_as_history(self):
+ history = self._extract_comm_or_hist_value()
+
+ self['class'] = TYP_COMM_KEY
+ self['name'] = 'HISTORY'
+ self['value'] = history
+
+ def _set_as_continue(self):
+ value = self._extract_comm_or_hist_value()
+
+ self['class'] = TYP_CONT_KEY
+ self['name'] = 'CONTINUE'
+ self['value'] = value
+
+ def _convert_value(self, value_orig):
+ """
+ things like 6 and 1.25 are converted with ast.literal_value
+
+ Things like 'hello' are stripped of quotes
+ """
+ import ast
+ if value_orig is None:
+ return value_orig
+
+ if value_orig.startswith("'") and value_orig.endswith("'"):
+ value = value_orig[1:-1]
+ else:
+
+ try:
+ avalue = ast.parse(value_orig).body[0].value
+ if isinstance(avalue, ast.BinOp):
+ # this is probably a string that happens to look like
+ # a binary operation, e.g. '25-3'
+ value = value_orig
+ else:
+ value = ast.literal_eval(value_orig)
+ except Exception:
+ value = self._convert_string(value_orig)
+
+ if isinstance(value, int) and '_' in value_orig:
+ value = value_orig
+
+ return value
+
+ def _convert_string(self, s):
+ if s == 'T':
+ return True
+ elif s == 'F':
+ return False
+ else:
+ return s
+
+ def _extract_comm_or_hist_value(self):
+ card_string = self['card_string']
+ if self._has_equals:
+ if len(card_string) >= 9:
+ value = card_string[9:]
+ else:
+ value = ''
+ else:
+ if len(card_string) >= 8:
+ # value=card_string[7:]
+ value = card_string[8:]
+ else:
+ value = ''
+ return value
+
+ def _check_type(self):
+ card_string = self['card_string']
+ if not isstring(card_string):
+ raise TypeError(
+ "card must be a string, got type %s" % type(card_string))
+
+ def _check_len(self):
+ ln = len(self['card_string'])
+ if ln > 80:
+ mess = "len(card) is %d. cards must have length < 80"
+ raise ValueError(mess)
--- /dev/null
+import sys
+import numpy as np
+from .. import util
+
+
+def check_header(header, rh):
+ for k in header:
+ v = header[k]
+ rv = rh[k]
+
+ if isinstance(rv, str):
+ v = v.strip()
+ rv = rv.strip()
+
+ assert v == rv, "testing equal key '%s'" % k
+
+
+def compare_headerlist_header(header_list, header):
+ """
+ The first is a list of dicts, second a FITSHDR
+ """
+ for entry in header_list:
+ name = entry['name'].upper()
+ value = entry['value']
+ hvalue = header[name]
+
+ if isinstance(hvalue, str):
+ hvalue = hvalue.strip()
+
+ assert value == hvalue, (
+ "testing header key '%s'" % name
+ )
+
+ if 'comment' in entry:
+ assert (
+ entry['comment'].strip() ==
+ header.get_comment(name).strip()
+ ), "testing comment for header key '%s'" % name
+
+
+def cast_shape(shape):
+ if len(shape) == 2 and shape[1] == 1:
+ return (shape[0], )
+ elif shape == (1, ):
+ return tuple()
+ else:
+ return shape
+
+
+def compare_array(arr1, arr2, name):
+ arr1_shape = cast_shape(arr1.shape)
+ arr2_shape = cast_shape(arr2.shape)
+
+ assert arr1_shape == arr2_shape, (
+ "testing arrays '%s' shapes are equal: "
+ "input %s, read: %s" % (name, arr1_shape, arr2_shape)
+ )
+
+ if sys.version_info >= (3, 0, 0) and arr1.dtype.char == 'S':
+ _arr1 = arr1.astype('U')
+ else:
+ _arr1 = arr1
+
+ res = np.where(_arr1 != arr2)
+ for i, w in enumerate(res):
+ assert w.size == 0, "testing array '%s' dim %d are equal" % (name, i)
+
+
+def compare_array_tol(arr1, arr2, tol, name):
+ assert arr1.shape == arr2.shape, (
+ "testing arrays '%s' shapes are equal: "
+ "input %s, read: %s" % (name, arr1.shape, arr2.shape)
+ )
+
+ adiff = np.abs((arr1 - arr2)/arr1)
+ maxdiff = adiff.max()
+ res = np.where(adiff > tol)
+ for i, w in enumerate(res):
+ assert w.size == 0, (
+ "testing array '%s' dim %d are "
+ "equal within tolerance %e, found "
+ "max diff %e" % (name, i, tol, maxdiff)
+ )
+
+
+def compare_array_abstol(arr1, arr2, tol, name):
+ assert arr1.shape == arr2.shape, (
+ "testing arrays '%s' shapes are equal: "
+ "input %s, read: %s" % (name, arr1.shape, arr2.shape)
+ )
+
+ adiff = np.abs(arr1-arr2)
+ maxdiff = adiff.max()
+ res = np.where(adiff > tol)
+ for i, w in enumerate(res):
+ assert w.size == 0, (
+ "testing array '%s' dim %d are "
+ "equal within tolerance %e, found "
+ "max diff %e" % (name, i, tol, maxdiff)
+ )
+
+
+def compare_object_array(arr1, arr2, name, rows=None):
+ """
+ The first must be object
+ """
+ if rows is None:
+ rows = np.arange(arr1.size)
+
+ for i, row in enumerate(rows):
+ if ((sys.version_info >= (3, 0, 0) and isinstance(arr2[i], bytes))
+ or isinstance(arr2[i], str)):
+
+ if sys.version_info >= (3, 0, 0) and isinstance(arr1[row], bytes):
+ _arr1row = arr1[row].decode('ascii')
+ else:
+ _arr1row = arr1[row]
+
+ assert _arr1row == arr2[i], (
+ "%s str el %d equal" % (name, i)
+ )
+ else:
+ delement = arr2[i]
+ orig = arr1[row]
+ s = len(orig)
+ compare_array(
+ orig, delement[0:s], "%s num el %d equal" % (name, i)
+ )
+
+
+def compare_rec(rec1, rec2, name):
+ for f in rec1.dtype.names:
+ rec1_shape = cast_shape(rec1[f].shape)
+ rec2_shape = cast_shape(rec2[f].shape)
+
+ assert rec1_shape == rec2_shape, (
+ "testing '%s' field '%s' shapes are equal: "
+ "input %s, read: %s" % (
+ name, f, rec1_shape, rec2_shape)
+ )
+
+ if sys.version_info >= (3, 0, 0) and rec1[f].dtype.char == 'S':
+ # for python 3, we get back unicode always
+ _rec1f = rec1[f].astype('U')
+ else:
+ _rec1f = rec1[f]
+
+ assert np.all(_rec1f == rec2[f])
+ # res = np.where(_rec1f != rec2[f])
+ # for w in res:
+ # assert w.size == 0, "testing column %s" % f
+
+
+def compare_rec_subrows(rec1, rec2, rows, name):
+ for f in rec1.dtype.names:
+ rec1_shape = cast_shape(rec1[f][rows].shape)
+ rec2_shape = cast_shape(rec2[f].shape)
+
+ assert rec1_shape == rec2_shape, (
+ "testing '%s' field '%s' shapes are equal: "
+ "input %s, read: %s" % (
+ name, f, rec1_shape, rec2_shape)
+ )
+
+ if sys.version_info >= (3, 0, 0) and rec1[f].dtype.char == 'S':
+ # for python 3, we get back unicode always
+ _rec1frows = rec1[f][rows].astype('U')
+ else:
+ _rec1frows = rec1[f][rows]
+
+ res = np.where(_rec1frows != rec2[f])
+ for w in res:
+ assert w.size == 0, "testing column %s" % f
+
+
+def compare_rec_with_var(rec1, rec2, name, rows=None):
+ """
+
+ First one *must* be the one with object arrays
+
+ Second can have fixed length
+
+ both should be same number of rows
+
+ """
+
+ if rows is None:
+ rows = np.arange(rec2.size)
+ assert rec1.size == rec2.size, (
+ "testing '%s' same number of rows" % name
+ )
+
+ # rec2 may have fewer fields
+ for f in rec2.dtype.names:
+
+ # f1 will have the objects
+ if util.is_object(rec1[f]):
+ compare_object_array(
+ rec1[f], rec2[f],
+ "testing '%s' field '%s'" % (name, f),
+ rows=rows
+ )
+ else:
+ compare_array(
+ rec1[f][rows], rec2[f],
+ "testing '%s' num field '%s' equal" % (name, f)
+ )
+
+
+def compare_names(read_names, true_names, lower=False, upper=False):
+ for nread, ntrue in zip(read_names, true_names):
+ if lower:
+ tname = ntrue.lower()
+ mess = "lower: '%s' vs '%s'" % (nread, tname)
+ else:
+ tname = ntrue.upper()
+ mess = "upper: '%s' vs '%s'" % (nread, tname)
+
+ assert nread == tname, mess
--- /dev/null
+import sys
+import numpy as np
+from functools import lru_cache
+
+from .._fitsio_wrap import cfitsio_use_standard_strings
+
+lorem_ipsum = (
+ 'Lorem ipsum dolor sit amet, consectetur adipiscing '
+ 'elit, sed do eiusmod tempor incididunt ut labore '
+ 'et dolore magna aliqua'
+)
+
+
+@lru_cache(maxsize=1)
+def make_data():
+
+ nvec = 2
+ ashape = (21, 21)
+ Sdtype = 'S6'
+ Udtype = 'U6'
+
+ # all currently available types, scalar, 1-d and 2-d array columns
+ dtype = [
+ ('u1scalar', 'u1'),
+ ('i1scalar', 'i1'),
+ ('b1scalar', '?'),
+ ('u2scalar', 'u2'),
+ ('i2scalar', 'i2'),
+ ('u4scalar', 'u4'),
+ ('i4scalar', '<i4'), # mix the byte orders a bit, test swapping
+ ('i8scalar', 'i8'),
+ ('f4scalar', 'f4'),
+ ('f8scalar', '>f8'),
+ ('c8scalar', 'c8'), # complex, two 32-bit
+ ('c16scalar', 'c16'), # complex, two 32-bit
+
+ ('u1vec', 'u1', nvec),
+ ('i1vec', 'i1', nvec),
+ ('b1vec', '?', nvec),
+ ('u2vec', 'u2', nvec),
+ ('i2vec', 'i2', nvec),
+ ('u4vec', 'u4', nvec),
+ ('i4vec', 'i4', nvec),
+ ('i8vec', 'i8', nvec),
+ ('f4vec', 'f4', nvec),
+ ('f8vec', 'f8', nvec),
+ ('c8vec', 'c8', nvec),
+ ('c16vec', 'c16', nvec),
+
+ ('u1arr', 'u1', ashape),
+ ('i1arr', 'i1', ashape),
+ ('b1arr', '?', ashape),
+ ('u2arr', 'u2', ashape),
+ ('i2arr', 'i2', ashape),
+ ('u4arr', 'u4', ashape),
+ ('i4arr', 'i4', ashape),
+ ('i8arr', 'i8', ashape),
+ ('f4arr', 'f4', ashape),
+ ('f8arr', 'f8', ashape),
+ ('c8arr', 'c8', ashape),
+ ('c16arr', 'c16', ashape),
+
+ # special case of (1,)
+ ('f8arr_dim1', 'f8', (1, )),
+
+
+ ('Sscalar', Sdtype),
+ ('Svec', Sdtype, nvec),
+ ('Sarr', Sdtype, ashape),
+ ]
+
+ if cfitsio_use_standard_strings():
+ dtype += [
+ ('Sscalar_nopad', Sdtype),
+ ('Svec_nopad', Sdtype, nvec),
+ ('Sarr_nopad', Sdtype, ashape),
+ ]
+
+ if sys.version_info > (3, 0, 0):
+ dtype += [
+ ('Uscalar', Udtype),
+ ('Uvec', Udtype, nvec),
+ ('Uarr', Udtype, ashape),
+ ]
+
+ if cfitsio_use_standard_strings():
+ dtype += [
+ ('Uscalar_nopad', Udtype),
+ ('Uvec_nopad', Udtype, nvec),
+ ('Uarr_nopad', Udtype, ashape),
+ ]
+
+ dtype2 = [
+ ('index', 'i4'),
+ ('x', 'f8'),
+ ('y', 'f8'),
+ ]
+
+ nrows = 4
+ data = np.zeros(nrows, dtype=dtype)
+
+ dtypes = [
+ 'u1', 'i1', 'u2', 'i2', 'u4', 'i4', 'i8', 'f4', 'f8', 'c8', 'c16',
+ ]
+ for t in dtypes:
+ if t in ['c8', 'c16']:
+ data[t+'scalar'] = [complex(i+1, (i+1)*2) for i in range(nrows)]
+ vname = t + 'vec'
+ for row in range(nrows):
+ for i in range(nvec):
+ index = (row + 1) * (i + 1)
+ data[vname][row, i] = complex(index, index*2)
+ aname = t+'arr'
+ for row in range(nrows):
+ for i in range(ashape[0]):
+ for j in range(ashape[1]):
+ index = (row + 1) * (i + 1) * (j + 1)
+ data[aname][row, i, j] = complex(index, index*2)
+
+ else:
+ data[t+'scalar'] = 1 + np.arange(nrows, dtype=t)
+ data[t+'vec'] = 1 + np.arange(
+ nrows*nvec, dtype=t,
+ ).reshape(nrows, nvec)
+ arr = 1 + np.arange(nrows*ashape[0]*ashape[1], dtype=t)
+ data[t+'arr'] = arr.reshape(nrows, ashape[0], ashape[1])
+
+ for t in ['b1']:
+ data[t+'scalar'] = (np.arange(nrows) % 2 == 0).astype('?')
+ data[t+'vec'] = (
+ np.arange(nrows*nvec) % 2 == 0
+ ).astype('?').reshape(nrows, nvec)
+
+ arr = (np.arange(nrows*ashape[0]*ashape[1]) % 2 == 0).astype('?')
+ data[t+'arr'] = arr.reshape(nrows, ashape[0], ashape[1])
+
+ # strings get padded when written to the fits file. And the way I do
+ # the read, I read all bytes (ala mrdfits) so the spaces are preserved.
+ #
+ # so we need to pad out the strings with blanks so we can compare
+
+ data['Sscalar'] = ['%-6s' % s for s in ['hello', 'world', 'good', 'bye']]
+ data['Svec'][:, 0] = '%-6s' % 'hello'
+ data['Svec'][:, 1] = '%-6s' % 'world'
+
+ s = 1 + np.arange(nrows*ashape[0]*ashape[1])
+ s = ['%-6s' % el for el in s]
+ data['Sarr'] = np.array(s).reshape(nrows, ashape[0], ashape[1])
+
+ if cfitsio_use_standard_strings():
+ data['Sscalar_nopad'] = ['hello', 'world', 'good', 'bye']
+ data['Svec_nopad'][:, 0] = 'hello'
+ data['Svec_nopad'][:, 1] = 'world'
+
+ s = 1 + np.arange(nrows*ashape[0]*ashape[1])
+ s = ['%s' % el for el in s]
+ data['Sarr_nopad'] = np.array(s).reshape(nrows, ashape[0], ashape[1])
+
+ if sys.version_info >= (3, 0, 0):
+ data['Uscalar'] = [
+ '%-6s' % s for s in ['hello', 'world', 'good', 'bye']
+ ]
+ data['Uvec'][:, 0] = '%-6s' % 'hello'
+ data['Uvec'][:, 1] = '%-6s' % 'world'
+
+ s = 1 + np.arange(nrows*ashape[0]*ashape[1])
+ s = ['%-6s' % el for el in s]
+ data['Uarr'] = np.array(s).reshape(nrows, ashape[0], ashape[1])
+
+ if cfitsio_use_standard_strings():
+ data['Uscalar_nopad'] = ['hello', 'world', 'good', 'bye']
+ data['Uvec_nopad'][:, 0] = 'hello'
+ data['Uvec_nopad'][:, 1] = 'world'
+
+ s = 1 + np.arange(nrows*ashape[0]*ashape[1])
+ s = ['%s' % el for el in s]
+ data['Uarr_nopad'] = np.array(s).reshape(
+ nrows, ashape[0], ashape[1],
+ )
+
+ # use a dict list so we can have comments
+ # for long key we used the largest possible
+
+ keys = [
+ {'name': 'test1', 'value': 35},
+ {'name': 'empty', 'value': ''},
+ {'name': 'long_keyword_name', 'value': 'stuff'},
+ {'name': 'test2', 'value': 'stuff',
+ 'comment': 'this is a string keyword'},
+ {'name': 'dbl', 'value': 23.299843,
+ 'comment': "this is a double keyword"},
+ {'name': 'edbl', 'value': 1.384123233e+43,
+ 'comment': "double keyword with exponent"},
+ {'name': 'lng', 'value': 2**63-1, 'comment': 'this is a long keyword'},
+ {'name': 'lngstr', 'value': lorem_ipsum, 'comment': 'long string'}
+ ]
+
+ # a second extension using the convenience function
+ nrows2 = 10
+ data2 = np.zeros(nrows2, dtype=dtype2)
+ data2['index'] = np.arange(nrows2, dtype='i4')
+ data2['x'] = np.arange(nrows2, dtype='f8')
+ data2['y'] = np.arange(nrows2, dtype='f8')
+
+ #
+ # ascii table
+ #
+
+ nvec = 2
+ ashape = (2, 3)
+ Sdtype = 'S6'
+ Udtype = 'U6'
+
+ # we support writing i2, i4, i8, f4 f8, but when reading cfitsio always
+ # reports their types as i4 and f8, so can't really use i8 and we are
+ # forced to read all floats as f8 precision
+
+ adtype = [
+ ('i2scalar', 'i2'),
+ ('i4scalar', 'i4'),
+ # ('i8scalar', 'i8'),
+ ('f4scalar', 'f4'),
+ ('f8scalar', 'f8'),
+ ('Sscalar', Sdtype),
+ ]
+ if sys.version_info >= (3, 0, 0):
+ adtype += [('Uscalar', Udtype)]
+
+ nrows = 4
+ try:
+ tdt = np.dtype(adtype, align=True)
+ except TypeError: # older numpy may not understand `align` argument
+ tdt = np.dtype(adtype)
+ adata = np.zeros(nrows, dtype=tdt)
+
+ adata['i2scalar'][:] = -32222 + np.arange(nrows, dtype='i2')
+ adata['i4scalar'][:] = -1353423423 + np.arange(nrows, dtype='i4')
+ adata['f4scalar'][:] = (
+ -2.55555555555555555555555e35 + np.arange(nrows, dtype='f4')*1.e35
+ )
+ adata['f8scalar'][:] = (
+ -2.55555555555555555555555e110 + np.arange(nrows, dtype='f8')*1.e110
+ )
+ adata['Sscalar'] = ['hello', 'world', 'good', 'bye']
+
+ if sys.version_info >= (3, 0, 0):
+ adata['Uscalar'] = ['hello', 'world', 'good', 'bye']
+
+ ascii_data = adata
+
+ #
+ # for variable length columns
+ #
+
+ # all currently available types, scalar, 1-d and 2-d array columns
+ dtype = [
+ ('u1scalar', 'u1'),
+ ('u1obj', 'O'),
+ ('i1scalar', 'i1'),
+ ('i1obj', 'O'),
+ ('u2scalar', 'u2'),
+ ('u2obj', 'O'),
+ ('i2scalar', 'i2'),
+ ('i2obj', 'O'),
+ ('u4scalar', 'u4'),
+ ('u4obj', 'O'),
+ ('i4scalar', '<i4'), # mix the byte orders a bit, test swapping
+ ('i4obj', 'O'),
+ ('i8scalar', 'i8'),
+ ('i8obj', 'O'),
+ ('f4scalar', 'f4'),
+ ('f4obj', 'O'),
+ ('f8scalar', '>f8'),
+ ('f8obj', 'O'),
+
+ ('u1vec', 'u1', nvec),
+ ('i1vec', 'i1', nvec),
+ ('u2vec', 'u2', nvec),
+ ('i2vec', 'i2', nvec),
+ ('u4vec', 'u4', nvec),
+ ('i4vec', 'i4', nvec),
+ ('i8vec', 'i8', nvec),
+ ('f4vec', 'f4', nvec),
+ ('f8vec', 'f8', nvec),
+
+ ('u1arr', 'u1', ashape),
+ ('i1arr', 'i1', ashape),
+ ('u2arr', 'u2', ashape),
+ ('i2arr', 'i2', ashape),
+ ('u4arr', 'u4', ashape),
+ ('i4arr', 'i4', ashape),
+ ('i8arr', 'i8', ashape),
+ ('f4arr', 'f4', ashape),
+ ('f8arr', 'f8', ashape),
+
+ # special case of (1,)
+ ('f8arr_dim1', 'f8', (1, )),
+
+ ('Sscalar', Sdtype),
+ ('Sobj', 'O'),
+ ('Svec', Sdtype, nvec),
+ ('Sarr', Sdtype, ashape),
+ ]
+
+ if sys.version_info > (3, 0, 0):
+ dtype += [
+ ('Uscalar', Udtype),
+ ('Uvec', Udtype, nvec),
+ ('Uarr', Udtype, ashape)]
+
+ nrows = 4
+ vardata = np.zeros(nrows, dtype=dtype)
+
+ for t in ['u1', 'i1', 'u2', 'i2', 'u4', 'i4', 'i8', 'f4', 'f8']:
+ vardata[t+'scalar'] = 1 + np.arange(nrows, dtype=t)
+ vardata[t+'vec'] = 1 + np.arange(nrows*nvec, dtype=t).reshape(
+ nrows, nvec,
+ )
+ arr = 1 + np.arange(nrows*ashape[0]*ashape[1], dtype=t)
+ vardata[t+'arr'] = arr.reshape(nrows, ashape[0], ashape[1])
+
+ for i in range(nrows):
+ vardata[t+'obj'][i] = vardata[t+'vec'][i]
+
+ # strings get padded when written to the fits file. And the way I do
+ # the read, I real all bytes (ala mrdfits) so the spaces are preserved.
+ #
+ # so for comparisons, we need to pad out the strings with blanks so we
+ # can compare
+
+ vardata['Sscalar'] = [
+ '%-6s' % s for s in ['hello', 'world', 'good', 'bye']
+ ]
+ vardata['Svec'][:, 0] = '%-6s' % 'hello'
+ vardata['Svec'][:, 1] = '%-6s' % 'world'
+
+ s = 1 + np.arange(nrows * ashape[0] * ashape[1])
+ s = ['%-6s' % el for el in s]
+ vardata['Sarr'] = np.array(s).reshape(nrows, ashape[0], ashape[1])
+
+ if sys.version_info >= (3, 0, 0):
+ vardata['Uscalar'] = [
+ '%-6s' % s for s in ['hello', 'world', 'good', 'bye']
+ ]
+ vardata['Uvec'][:, 0] = '%-6s' % 'hello'
+ vardata['Uvec'][:, 1] = '%-6s' % 'world'
+
+ s = 1 + np.arange(nrows*ashape[0]*ashape[1])
+ s = ['%-6s' % el for el in s]
+ vardata['Uarr'] = np.array(s).reshape(nrows, ashape[0], ashape[1])
+
+ for i in range(nrows):
+ vardata['Sobj'][i] = vardata['Sscalar'][i].rstrip()
+
+ #
+ # for bitcol columns
+ #
+ nvec = 2
+ ashape = (21, 21)
+
+ dtype = [
+ ('b1vec', '?', nvec),
+ ('b1arr', '?', ashape)
+ ]
+
+ nrows = 4
+ bdata = np.zeros(nrows, dtype=dtype)
+
+ for t in ['b1']:
+ bdata[t+'vec'] = (np.arange(nrows*nvec) % 2 == 0).astype('?').reshape(
+ nrows, nvec,
+ )
+ arr = (np.arange(nrows*ashape[0]*ashape[1]) % 2 == 0).astype('?')
+ bdata[t+'arr'] = arr.reshape(nrows, ashape[0], ashape[1])
+
+ return {
+ 'data': data,
+ 'keys': keys,
+ 'data2': data2,
+ 'ascii_data': ascii_data,
+ 'vardata': vardata,
+ 'bdata': bdata,
+ }
--- /dev/null
+import tempfile
+import os
+import numpy as np
+from ..fitslib import write, FITS
+
+
+def test_empty_image_slice():
+
+ shape = (10, 10)
+ data = np.arange(shape[0] * shape[1]).reshape(shape)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ write(fname, data, clobber=True)
+
+ with FITS(fname) as fits:
+ assert fits[0][0:0, 0:0].size == 0
+
+ assert fits[0][0:8, 0:0].size == 0
+
+ assert fits[0][0:0, 0:8].size == 0
--- /dev/null
+import os
+import tempfile
+import warnings
+import numpy as np
+from .makedata import make_data, lorem_ipsum
+from .checks import check_header, compare_headerlist_header
+from ..fitslib import FITS, read_header, write
+from ..header import FITSHDR
+from ..hdu.base import INVALID_HDR_CHARS
+
+
+def test_add_delete_and_update_records():
+ # Build a FITSHDR from a few records (no need to write on disk)
+ # Record names have to be in upper case to match with FITSHDR.add_record
+ recs = [
+ {'name': "First_record".upper(), 'value': 1,
+ 'comment': "number 1"},
+ {'name': "Second_record".upper(), 'value': "2"},
+ {'name': "Third_record".upper(), 'value': "3"},
+ {'name': "Last_record".upper(), 'value': 4,
+ 'comment': "number 4"}
+ ]
+ hdr = FITSHDR(recs)
+
+ # Add a new record
+ hdr.add_record({'name': 'New_record'.upper(), 'value': 5})
+
+ # Delete number 2 and 4
+ hdr.delete('Second_record'.upper())
+ hdr.delete('Last_record'.upper())
+
+ # Update records : first and new one
+ hdr['First_record'] = 11
+ hdr['New_record'] = 3
+
+ # Do some checks : len and get value/comment
+ assert len(hdr) == 3
+ assert hdr['First_record'] == 11
+ assert hdr['New_record'] == 3
+ assert hdr['Third_record'] == '3'
+ assert hdr.get_comment('First_record') == 'number 1'
+ assert not hdr.get_comment('New_record')
+
+
+def testHeaderCommentPreserved():
+ """
+ Test that the comment is preserved after resetting the value
+ """
+
+ l1 = 'KEY1 = 77 / My comment1'
+ l2 = 'KEY2 = 88 / My comment2'
+ hdr = FITSHDR()
+ hdr.add_record(l1)
+ hdr.add_record(l2)
+
+ hdr['key1'] = 99
+ assert hdr.get_comment('key1') == 'My comment1', (
+ 'comment not preserved'
+ )
+
+
+def test_header_write_read():
+ """
+ Test a basic header write and read
+
+ Note the other read/write tests also are checking header writing with
+ a list of dicts
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.zeros(10)
+ header = {
+ 'x': 35,
+ 'y': 88.215,
+ 'eval': 1.384123233e+43,
+ 'empty': '',
+ 'funky': '35-8', # test old bug when strings look
+ # like expressions
+ 'name': 'J. Smith',
+ 'what': '89113e6', # test bug where converted to float
+ 'und': None,
+ 'binop': '25-3', # test string with binary operation in it
+ 'unders': '1_000_000', # test string with underscore
+ 'longs': lorem_ipsum,
+ # force hierarch + continue
+ "long_keyword_name": lorem_ipsum,
+ }
+ fits.write_image(data, header=header)
+
+ rh = fits[0].read_header()
+ check_header(header, rh)
+
+ with FITS(fname) as fits:
+ rh = fits[0].read_header()
+ check_header(header, rh)
+
+
+def test_header_update():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.zeros(10)
+ header1 = {
+ 'SCARD': 'one',
+ 'ICARD': 1,
+ 'FCARD': 1.0,
+ 'LCARD': True
+ }
+ header2 = {
+ 'SCARD': 'two',
+ 'ICARD': 2,
+ 'FCARD': 2.0,
+ 'LCARD': False,
+
+ 'SNEW': 'two',
+ 'INEW': 2,
+ 'FNEW': 2.0,
+ 'LNEW': False
+ }
+ fits.write_image(data, header=header1)
+ rh = fits[0].read_header()
+ check_header(header1, rh)
+
+ # Update header
+ fits[0].write_keys(header2)
+
+ with FITS(fname) as fits:
+ rh = fits[0].read_header()
+ check_header(header2, rh)
+
+
+def test_read_header_case():
+ """
+ Test read_header with and without case sensitivity
+
+ The reason we need a special test for this is because
+ the read_header code is optimized for speed and has
+ a different code path
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.zeros(10)
+ adata = make_data()
+ fits.write_image(data, header=adata['keys'], extname='First')
+ fits.write_image(data, header=adata['keys'], extname='second')
+
+ cases = [
+ ('First', True),
+ ('FIRST', False),
+ ('second', True),
+ ('seConD', False),
+ ]
+ for ext, ci in cases:
+ h = read_header(fname, ext=ext, case_sensitive=ci)
+ compare_headerlist_header(adata['keys'], h)
+
+
+def test_blank_key_comments():
+ """
+ test a few different comments
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ records = [
+ # empty should return empty
+ {'name': None, 'value': '', 'comment': ''},
+ # this will also return empty
+ {'name': None, 'value': '', 'comment': ' '},
+ # this will return exactly
+ {'name': None, 'value': '', 'comment': ' h'},
+ # this will return exactly
+ {'name': None, 'value': '', 'comment': '--- test comment ---'},
+ ]
+ header = FITSHDR(records)
+
+ fits.write(None, header=header)
+
+ rh = fits[0].read_header()
+
+ rrecords = rh.records()
+
+ for i, ri in ((0, 6), (1, 7), (2, 8)):
+ rec = records[i]
+ rrec = rrecords[ri]
+
+ assert rec['name'] is None, (
+ 'checking name is None'
+ )
+
+ comment = rec['comment']
+ rcomment = rrec['comment']
+ if '' == comment.strip():
+ comment = ''
+
+ assert comment == rcomment, (
+ "check empty key comment"
+ )
+
+
+def test_blank_key_comments_from_cards():
+ """
+ test a few different comments
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ records = [
+ ' ', # noqa
+ ' --- testing comment --- ', # noqa
+ ' --- testing comment --- ', # noqa
+ "COMMENT testing ", # noqa
+ ]
+ header = FITSHDR(records)
+
+ fits.write(None, header=header)
+
+ rh = fits[0].read_header()
+
+ rrecords = rh.records()
+
+ assert rrecords[6]['name'] is None, (
+ 'checking name is None'
+ )
+ assert rrecords[6]['comment'] == '', (
+ 'check empty key comment'
+ )
+ assert rrecords[7]['name'] is None, (
+ 'checking name is None'
+ )
+ assert rrecords[7]['comment'] == ' --- testing comment ---', (
+ "check empty key comment"
+ )
+ assert rrecords[8]['name'] is None, (
+ 'checking name is None'
+ )
+ assert rrecords[8]['comment'] == '--- testing comment ---', (
+ "check empty key comment"
+ )
+ assert rrecords[9]['name'] == 'COMMENT', (
+ 'checking name is COMMENT'
+ )
+ assert rrecords[9]['comment'] == 'testing', (
+ "check comment"
+ )
+
+
+def test_header_from_cards():
+ """
+ test generating a header from cards, writing it out and getting
+ back what we put in
+ """
+ hdr_from_cards = FITSHDR([
+ "IVAL = 35 / integer value ", # noqa
+ "SHORTS = 'hello world' ", # noqa
+ "UND = ", # noqa
+ "LONGS = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiu&'", # noqa
+ "CONTINUE 'smod tempor incididunt ut labore et dolore magna aliqua' ", # noqa
+ "DBL = 1.25 ", # noqa
+ ])
+ header = [
+ {'name': 'ival', 'value': 35, 'comment': 'integer value'},
+ {'name': 'shorts', 'value': 'hello world'},
+ {'name': 'und', 'value': None},
+ {'name': 'longs', 'value': lorem_ipsum},
+ {'name': 'dbl', 'value': 1.25},
+ ]
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.zeros(10)
+ fits.write_image(data, header=hdr_from_cards)
+
+ rh = fits[0].read_header()
+ compare_headerlist_header(header, rh)
+
+ with FITS(fname) as fits:
+ rh = fits[0].read_header()
+ compare_headerlist_header(header, rh)
+
+
+def test_bad_header_write_raises():
+ """
+ Test that an invalid header raises.
+ """
+
+ for c in INVALID_HDR_CHARS:
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ try:
+ hdr = {'bla%sg' % c: 3}
+ data = np.zeros(10)
+
+ write(fname, data, header=hdr, clobber=True)
+ except Exception as e:
+ assert "header key 'BLA%sG' has" % c in str(e)
+
+
+def test_header_template():
+ """
+ test adding bunch of cards from a split template
+ """
+
+ header_template = """SIMPLE = T /
+BITPIX = 8 / bits per data value
+NAXIS = 0 / number of axes
+EXTEND = T / Extensions are permitted
+ORIGIN = 'LSST DM Header Service'/ FITS file originator
+
+ ---- Date, night and basic image information ----
+DATE = / Creation Date and Time of File
+DATE-OBS= / Date of the observation (image acquisition)
+DATE-BEG= / Time at the start of integration
+DATE-END= / end date of the observation
+MJD = / Modified Julian Date that the file was written
+MJD-OBS = / Modified Julian Date of observation
+MJD-BEG = / Modified Julian Date derived from DATE-BEG
+MJD-END = / Modified Julian Date derived from DATE-END
+OBSID = / ImageName from Camera StartIntergration
+GROUPID = / imageSequenceName from StartIntergration
+OBSTYPE = / BIAS, DARK, FLAT, OBJECT
+BUNIT = 'adu ' / Brightness units for pixel array
+
+ ---- Telescope info, location, observer ----
+TELESCOP= 'LSST AuxTelescope' / Telescope name
+INSTRUME= 'LATISS' / Instrument used to obtain these data
+OBSERVER= 'LSST' / Observer name(s)
+OBS-LONG= -70.749417 / [deg] Observatory east longitude
+OBS-LAT = -30.244639 / [deg] Observatory latitude
+OBS-ELEV= 2663.0 / [m] Observatory elevation
+OBSGEO-X= 1818938.94 / [m] X-axis Geocentric coordinate
+OBSGEO-Y= -5208470.95 / [m] Y-axis Geocentric coordinate
+OBSGEO-Z= -3195172.08 / [m] Z-axis Geocentric coordinate
+
+ ---- Pointing info, etc. ----
+
+DECTEL = / Telescope DEC of observation
+ROTPATEL= / Telescope Rotation
+ROTCOORD= 'sky' / Telescope Rotation Coordinates
+RA = / RA of Target
+DEC = / DEC of Target
+ROTPA = / Rotation angle relative to the sky (deg)
+HASTART = / [HH:MM:SS] Telescope hour angle at start
+ELSTART = / [deg] Telescope zenith distance at start
+AZSTART = / [deg] Telescope azimuth angle at start
+AMSTART = / Airmass at start
+HAEND = / [HH:MM:SS] Telescope hour angle at end
+ELEND = / [deg] Telescope zenith distance at end
+AZEND = / [deg] Telescope azimuth angle at end
+AMEND = / Airmass at end
+
+ ---- Image-identifying used to build OBS-ID ----
+TELCODE = 'AT' / The code for the telecope
+CONTRLLR= / The controller (e.g. O for OCS, C for CCS)
+DAYOBS = / The observation day as defined by image name
+SEQNUM = / The sequence number from the image name
+GROUPID = /
+
+ ---- Information from Camera
+CCD_MANU= 'ITL' / CCD Manufacturer
+CCD_TYPE= '3800C' / CCD Model Number
+CCD_SERN= '20304' / Manufacturers? CCD Serial Number
+LSST_NUM= 'ITL-3800C-098' / LSST Assigned CCD Number
+SEQCKSUM= / Checksum of Sequencer
+SEQNAME = / SequenceName from Camera StartIntergration
+REBNAME = / Name of the REB
+CONTNUM = / CCD Controller (WREB) Serial Number
+IMAGETAG= / DAQ Image id
+TEMP_SET= / Temperature set point (deg C)
+CCDTEMP = / Measured temperature (deg C)
+
+ ---- Geometry from Camera ----
+DETSIZE = / Size of sensor
+OVERH = / Over-scan pixels
+OVERV = / Vert-overscan pix
+PREH = / Pre-scan pixels
+
+ ---- Filter/grating information ----
+FILTER = / Name of the filter
+FILTPOS = / Filter position
+GRATING = / Name of the second disperser
+GRATPOS = / disperser position
+LINSPOS = / Linear Stage
+
+ ---- Exposure-related information ----
+EXPTIME = / Exposure time in seconds
+SHUTTIME= / Shutter exposure time in seconds
+DARKTIME= / Dark time in seconds
+
+ ---- Header information ----
+FILENAME= / Original file name
+HEADVER = / Version of header
+
+ ---- Checksums ----
+CHECKSUM= / checksum for the current HDU
+DATASUM = / checksum of the data records\n"""
+
+ lines = header_template.splitlines()
+ hdr = FITSHDR()
+ for line in lines:
+ hdr.add_record(line)
+
+
+def test_corrupt_continue():
+ """
+ test with corrupt continue, just make sure it doesn't crash
+ """
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ with warnings.catch_warnings(record=True) as _:
+
+ hdr_from_cards = FITSHDR([
+ "IVAL = 35 / integer value ", # noqa
+ "SHORTS = 'hello world' ", # noqa
+ "CONTINUE= ' ' / '&' / Current observing orogram ", # noqa
+ "UND = ", # noqa
+ "DBL = 1.25 ", # noqa
+ ])
+
+ with FITS(fname, 'rw') as fits:
+ fits.write(None, header=hdr_from_cards)
+
+ read_header(fname)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ with warnings.catch_warnings(record=True) as _:
+
+ hdr_from_cards = FITSHDR([
+ "IVAL = 35 / integer value ", # noqa
+ "SHORTS = 'hello world' ", # noqa
+ "PROGRAM = 'Setting the Scale: Determining the Absolute Mass Normalization and &'", # noqa
+ "CONTINUE 'Scaling Relations for Clusters at z~0.1&' ", # noqa
+ "CONTINUE '&' / Current observing orogram ", # noqa
+ "UND = ", # noqa
+ "DBL = 1.25 ", # noqa
+ ])
+
+ with FITS(fname, 'rw') as fits:
+ fits.write(None, header=hdr_from_cards)
+
+ read_header(fname)
+
+
+def record_exists(header_records, key, value):
+ for rec in header_records:
+ if rec['name'] == key and rec['value'] == value:
+ return True
+
+ return False
+
+
+def test_read_comment_history():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.arange(100).reshape(10, 10)
+ fits.create_image_hdu(data)
+ hdu = fits[-1]
+ hdu.write_comment('A COMMENT 1')
+ hdu.write_comment('A COMMENT 2')
+ hdu.write_history('SOME HISTORY 1')
+ hdu.write_history('SOME HISTORY 2')
+ fits.close()
+
+ with FITS(fname, 'r') as fits:
+ hdu = fits[-1]
+ header = hdu.read_header()
+ records = header.records()
+ assert record_exists(records, 'COMMENT', 'A COMMENT 1')
+ assert record_exists(records, 'COMMENT', 'A COMMENT 2')
+ assert record_exists(records, 'HISTORY', 'SOME HISTORY 1')
+ assert record_exists(records, 'HISTORY', 'SOME HISTORY 2')
+
+
+def test_write_key_dict():
+ """
+ test that write_key works using a standard key dict
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ with FITS(fname, 'rw') as fits:
+
+ im = np.zeros((10, 10), dtype='i2')
+ fits.write(im)
+
+ keydict = {
+ 'name': 'test',
+ 'value': 35,
+ 'comment': 'keydict test',
+ }
+ fits[-1].write_key(**keydict)
+
+ h = fits[-1].read_header()
+
+ assert h['test'] == keydict['value']
+ assert h.get_comment('test') == keydict['comment']
+
+
+if __name__ == '__main__':
+ test_header_write_read()
--- /dev/null
+import os
+import tempfile
+import pytest
+from ..fitslib import read_header, FITS
+from ..fits_exceptions import FITSFormatError
+
+
+def test_header_junk():
+ """
+ test lenient treatment of garbage written by IDL mwrfits
+ """
+
+ data=b"""SIMPLE = T /Primary Header created by MWRFITS v1.11 BITPIX = 16 / NAXIS = 0 / EXTEND = T /Extensions may be present BLAT = 1 /integer FOO = 1.00000 /float (or double?) BAR@ = NAN /float NaN BI.Z = NaN /double NaN BAT = INF /1.0 / 0.0 BOO = -INF /-1.0 / 0.0 QUAT = ' ' /blank string QUIP = '1.0 ' /number in quotes QUIZ = ' 1.0 ' /number in quotes with a leading space QUI\xf4\x04 = 'NaN ' /NaN in quotes HIERARCH QU.@D = 'Inf ' END """ # noqa
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with open(fname, 'wb') as fobj:
+ fobj.write(data)
+
+ h = read_header(fname)
+ # these keys are not hierarch but we can parse the name and then
+ # leave the value as a string, so we do that.
+ assert h['bar@'] == 'NAN', 'NAN garbage'
+ assert h['bi.z'] == 'NaN', 'NaN garbage'
+ assert h['bat'] == 'INF', 'INF garbage'
+ assert h['boo'] == '-INF', '-INF garbage'
+ assert h['quat'] == '', 'blank'
+ assert h['quip'] == '1.0', '1.0 in quotes'
+ assert h['quiz'] == ' 1.0', '1.0 in quotes'
+ # the key in the header is 'QUI' + two non-ascii chars and gets
+ # translated to `QUI__`
+ assert h['qui__'] == 'NaN', 'NaN in quotes'
+ # this key is `HIERARCH QU.@D` in the header and so gets read as is
+ assert h['qu.@d'] == 'Inf', 'Inf in quotes'
+
+
+def test_Header_Junk_Non_Ascii():
+ data = b"SIMPLE = T / file does conform to FITS standard BITPIX = 16 / number of bits per data pixel NAXIS = 0 / number of data axes EXTEND = T / FITS dataset may contain extensions COMMENT FITS (Flexible Image Transport System) format is defined in 'AstronomyCOMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H @\x0f@\x0f \x02\x05\x18@\x02\x02\xc5@\x0c\x03\xf3@\x080\x02\x03\xbc@\x0f@@@@@@@@ END " # noqa
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ with open(fname, 'wb') as fobj:
+ fobj.write(data)
+
+ h = read_header(fname)
+ assert h["@_@_"] is None
+
+
+def test_missing_xtension_keyword():
+ """
+ Misformatted header with extension not properly marked with
+ XTENSION
+ """
+
+ data=b"""SIMPLE = T / This is a FITS file BITPIX = 8 / NAXIS = 0 / EXTEND = T / This file may contain FITS extensions NEXTEND = 7 / Number of extensions END SIMPLE = T / file does conform to FITS standard BITPIX = 32 / number of bits per data pixel NAXIS = 2 / number of data axes NAXIS1 = 30 / length of data axis 1 NAXIS2 = 30 / length of data axis 2 EXTEND = T / FITS dataset may contain extensions END """ # noqa
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with open(fname, 'wb') as fobj:
+ fobj.write(data)
+
+ with pytest.raises(FITSFormatError):
+ with FITS(fname) as fits:
+ print(fits)
--- /dev/null
+import os
+import tempfile
+# import warnings
+from .checks import check_header, compare_array
+import numpy as np
+from ..fitslib import FITS
+
+DTYPES = ['u1', 'i1', 'u2', 'i2', '<u4', 'i4', 'i8', '>f4', 'f8']
+
+
+def test_image_write_read():
+ """
+ Test a basic image write, data and a header, then reading back in to
+ check the values
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ with FITS(fname, 'rw') as fits:
+
+ # note mixing up byte orders a bit
+ for dtype in DTYPES:
+ data = np.arange(5*20, dtype=dtype).reshape(5, 20)
+ header = {'DTYPE': dtype, 'NBYTES': data.dtype.itemsize}
+ fits.write_image(data, header=header)
+ rdata = fits[-1].read()
+
+ compare_array(data, rdata, "images")
+
+ rh = fits[-1].read_header()
+ check_header(header, rh)
+
+ with FITS(fname) as fits:
+ for i in range(len(DTYPES)):
+ assert not fits[i].is_compressed(), 'not compressed'
+
+
+def test_image_write_empty():
+ """
+ Test a basic image write, with no data and just a header, then reading
+ back in to check the values
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ data = None
+
+ header = {
+ 'EXPTIME': 120,
+ 'OBSERVER': 'Beatrice Tinsley',
+ 'INSTRUME': 'DECam',
+ 'FILTER': 'r',
+ }
+ ccds = ['CCD1', 'CCD2', 'CCD3', 'CCD4', 'CCD5', 'CCD6', 'CCD7', 'CCD8']
+ with FITS(fname, 'rw', ignore_empty=True) as fits:
+ for extname in ccds:
+ fits.write_image(data, header=header)
+ _ = fits[-1].read()
+ rh = fits[-1].read_header()
+ check_header(header, rh)
+
+
+def test_image_write_read_from_dims():
+ """
+ Test creating an image from dims and writing in place
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ # note mixing up byte orders a bit
+ for dtype in DTYPES:
+ data = np.arange(5*20, dtype=dtype).reshape(5, 20)
+
+ fits.create_image_hdu(dims=data.shape, dtype=data.dtype)
+
+ fits[-1].write(data)
+ rdata = fits[-1].read()
+
+ compare_array(data, rdata, "images")
+
+ with FITS(fname) as fits:
+ for i in range(len(DTYPES)):
+ assert not fits[i].is_compressed(), "not compressed"
+
+
+def test_image_write_read_from_dims_chunks():
+ """
+ Test creating an image and reading/writing chunks
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ # note mixing up byte orders a bit
+ for dtype in DTYPES:
+ data = np.arange(5*3, dtype=dtype).reshape(5, 3)
+
+ fits.create_image_hdu(dims=data.shape, dtype=data.dtype)
+
+ chunk1 = data[0:2, :]
+ chunk2 = data[2:, :]
+
+ #
+ # first using scalar pixel offset
+ #
+
+ fits[-1].write(chunk1)
+
+ start = chunk1.size
+ fits[-1].write(chunk2, start=start)
+
+ rdata = fits[-1].read()
+
+ compare_array(data, rdata, "images")
+
+ #
+ # now using sequence, easier to calculate
+ #
+
+ fits.create_image_hdu(dims=data.shape,
+ dtype=data.dtype)
+
+ # first using pixel offset
+ fits[-1].write(chunk1)
+
+ start = [2, 0]
+ fits[-1].write(chunk2, start=start)
+
+ rdata2 = fits[-1].read()
+
+ compare_array(data, rdata2, "images")
+
+ with FITS(fname) as fits:
+ for i in range(len(DTYPES)):
+ assert not fits[i].is_compressed(), "not compressed"
+
+
+def test_image_slice():
+ """
+ test reading an image slice
+ """
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ # note mixing up byte orders a bit
+ for dtype in DTYPES:
+ data = np.arange(16*20, dtype=dtype).reshape(16, 20)
+ header = {'DTYPE': dtype, 'NBYTES': data.dtype.itemsize}
+
+ fits.write_image(data, header=header)
+ rdata = fits[-1][4:12, 9:17]
+
+ compare_array(data[4:12, 9:17], rdata, "images")
+
+ rh = fits[-1].read_header()
+ check_header(header, rh)
+
+
+def _check_shape(expected_data, rdata):
+ mess = (
+ 'Data are not the same (Expected shape: %s, '
+ 'actual shape: %s.' % (expected_data.shape, rdata.shape)
+ )
+ np.testing.assert_array_equal(expected_data, rdata, mess)
+
+
+def test_read_flip_axis_slice():
+ """
+ Test reading a slice when the slice's start is less than the slice's stop.
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ dtype = np.int16
+ data = np.arange(100 * 200, dtype=dtype).reshape(100, 200)
+ fits.write_image(data)
+ hdu = fits[-1]
+ rdata = hdu[:, 130:70]
+
+ # Expanded by two to emulate adding one to the start value, and
+ # adding one to the calculated dimension.
+ expected_data = data[:, 130:70:-1]
+
+ _check_shape(expected_data, rdata)
+
+ rdata = hdu[:, 130:70:-6]
+ expected_data = data[:, 130:70:-6]
+ _check_shape(expected_data, rdata)
+
+ # Expanded by two to emulate adding one to the start value, and
+ # adding one to the calculated dimension.
+ expected_data = data[:, 130:70:-6]
+ _check_shape(expected_data, rdata)
+
+ # Positive step integer with start > stop will return an empty
+ # array
+ rdata = hdu[:, 90:60:4]
+ expected_data = np.empty(0, dtype=dtype)
+ _check_shape(expected_data, rdata)
+
+ # Negative step integer with start < stop will return an empty
+ # array.
+ rdata = hdu[:, 60:90:-4]
+ expected_data = np.empty(0, dtype=dtype)
+ _check_shape(expected_data, rdata)
+
+
+def test_image_slice_striding():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ # note mixing up byte orders a bit
+ for dtype in DTYPES:
+ data = np.arange(16*20, dtype=dtype).reshape(16, 20)
+ header = {'DTYPE': dtype, 'NBYTES': data.dtype.itemsize}
+ fits.write_image(data, header=header)
+
+ rdata = fits[-1][4:16:4, 2:20:2]
+ expected_data = data[4:16:4, 2:20:2]
+ assert rdata.shape == expected_data.shape, (
+ "Shapes differ with dtype %s" % dtype
+ )
+ compare_array(
+ expected_data, rdata, "images with dtype %s" % dtype
+ )
+
+
+def test_read_ignore_scaling():
+ """
+ Test the flag to ignore scaling when reading an HDU.
+ """
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ dtype = 'i2'
+ data = np.arange(10 * 20, dtype=dtype).reshape(10, 20)
+ header = {
+ 'DTYPE': dtype,
+ 'BITPIX': 16,
+ 'NBYTES': data.dtype.itemsize,
+ 'BZERO': 9.33,
+ 'BSCALE': 3.281
+ }
+
+ fits.write_image(data, header=header)
+ hdu = fits[-1]
+
+ rdata = hdu.read()
+ assert rdata.dtype == np.float32, 'Wrong dtype.'
+
+ hdu.ignore_scaling = True
+ rdata = hdu[:, :]
+ assert rdata.dtype == dtype, 'Wrong dtype when ignoring.'
+ np.testing.assert_array_equal(
+ data, rdata, err_msg='Wrong unscaled data.'
+ )
+
+ rh = fits[-1].read_header()
+ check_header(header, rh)
+
+ hdu.ignore_scaling = False
+ rdata = hdu[:, :]
+ assert rdata.dtype == np.float32, (
+ 'Wrong dtype when not ignoring.'
+ )
+ np.testing.assert_array_equal(
+ data.astype(np.float32), rdata,
+ err_msg='Wrong scaled data returned.'
+ )
--- /dev/null
+import pytest
+import sys
+import os
+import tempfile
+from .checks import (
+ # check_header,
+ compare_array,
+ compare_array_abstol,
+)
+import numpy as np
+from ..fitslib import (
+ FITS,
+ read,
+ write,
+)
+
+
+@pytest.mark.parametrize(
+ 'compress',
+ [
+ 'rice',
+ 'hcompress',
+ 'plio',
+ 'gzip',
+ 'gzip_2',
+ 'gzip_lossless',
+ 'gzip_2_lossless',
+ ]
+)
+def test_compressed_write_read(compress):
+ """
+ Test writing and reading a rice compressed image
+ """
+ nrows = 5
+ ncols = 20
+ if compress in ['rice', 'hcompress'] or 'gzip' in compress:
+ dtypes = ['u1', 'i1', 'u2', 'i2', 'u4', 'i4', 'f4', 'f8']
+ elif compress == 'plio':
+ dtypes = ['i1', 'i2', 'i4', 'f4', 'f8']
+ else:
+ raise ValueError('unexpected compress %s' % compress)
+
+ if 'lossless' in compress:
+ qlevel = None
+ else:
+ qlevel = 16
+
+ seed = 1919
+ rng = np.random.RandomState(seed)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ for ext, dtype in enumerate(dtypes):
+ if dtype[0] == 'f':
+ data = rng.normal(size=(nrows, ncols))
+ if compress == 'plio':
+ data = data.clip(min=0)
+ data = data.astype(dtype)
+ else:
+ data = np.arange(
+ nrows * ncols, dtype=dtype,
+ ).reshape(nrows, ncols)
+
+ csend = compress.replace('_lossless', '')
+ write(fname, data, compress=csend, qlevel=qlevel)
+ rdata = read(fname, ext=ext+1)
+
+ if 'lossless' in compress or dtype[0] in ['i', 'u']:
+ compare_array(
+ data, rdata,
+ "%s compressed images ('%s')" % (compress, dtype)
+ )
+ else:
+ # lossy floating point
+ compare_array_abstol(
+ data,
+ rdata,
+ 0.2,
+ "%s compressed images ('%s')" % (compress, dtype),
+ )
+
+ with FITS(fname) as fits:
+ for ii in range(len(dtypes)):
+ i = ii + 1
+ assert fits[i].is_compressed(), "is compressed"
+
+
+@pytest.mark.parametrize(
+ 'compress',
+ [
+ 'rice',
+ 'hcompress',
+ 'plio',
+ 'gzip',
+ 'gzip_2',
+ 'gzip_lossless',
+ 'gzip_2_lossless',
+ ]
+)
+def test_compressed_write_read_fitsobj(compress):
+ """
+ Test writing and reading a rice compressed image
+
+ In this version, keep the fits object open
+ """
+ nrows = 5
+ ncols = 20
+ if compress in ['rice', 'hcompress'] or 'gzip' in compress:
+ dtypes = ['u1', 'i1', 'u2', 'i2', 'u4', 'i4', 'f4', 'f8']
+ # dtypes = ['u2']
+ elif compress == 'plio':
+ dtypes = ['i1', 'i2', 'i4', 'f4', 'f8']
+ else:
+ raise ValueError('unexpected compress %s' % compress)
+
+ if 'lossless' in compress:
+ qlevel = None
+ # qlevel = 9999
+ else:
+ qlevel = 16
+
+ seed = 1919
+ rng = np.random.RandomState(seed)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ # note i8 not supported for compressed!
+
+ for dtype in dtypes:
+ if dtype[0] == 'f':
+ data = rng.normal(size=(nrows, ncols))
+ if compress == 'plio':
+ data = data.clip(min=0)
+ data = data.astype(dtype)
+ else:
+ data = np.arange(
+ nrows * ncols, dtype=dtype,
+ ).reshape(nrows, ncols)
+
+ csend = compress.replace('_lossless', '')
+ fits.write_image(data, compress=csend, qlevel=qlevel)
+ rdata = fits[-1].read()
+
+ if 'lossless' in compress or dtype[0] in ['i', 'u']:
+ # for integers we have chosen a wide range of values, so
+ # there will be no quantization and we expect no
+ # information loss
+ compare_array(
+ data, rdata,
+ "%s compressed images ('%s')" % (compress, dtype)
+ )
+ else:
+ # lossy floating point
+ compare_array_abstol(
+ data,
+ rdata,
+ 0.2,
+ "%s compressed images ('%s')" % (compress, dtype),
+ )
+
+ with FITS(fname) as fits:
+ for ii in range(len(dtypes)):
+ i = ii + 1
+ assert fits[i].is_compressed(), "is compressed"
+
+
+@pytest.mark.skipif(sys.version_info < (3, 9),
+ reason='importlib bug in 3.8')
+def test_gzip_tile_compressed_read_lossless_astropy():
+ """
+ Test reading an image gzip compressed by astropy (fixed by cfitsio 3.49)
+ """
+ import importlib.resources
+ ref = importlib.resources.files("fitsio") / 'test_images' / 'test_gzip_compressed_image.fits.fz' # noqa
+ with importlib.resources.as_file(ref) as gzip_file:
+ data = read(gzip_file)
+
+ compare_array(data, data*0.0, "astropy lossless compressed image")
+
+
+def test_compress_preserve_zeros():
+ """
+ Test writing and reading gzip compressed image
+ """
+
+ zinds = [
+ (1, 3),
+ (2, 9),
+ ]
+
+ dtypes = ['f4', 'f8']
+
+ seed = 2020
+ rng = np.random.RandomState(seed)
+
+ # Do not test hcompress as it doesn't support SUBTRACTIVE_DITHER_2
+ for compress in ['gzip', 'gzip_2', 'rice']:
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ for dtype in dtypes:
+
+ data = rng.normal(size=5*20).reshape(5, 20).astype(dtype)
+ for zind in zinds:
+ data[zind[0], zind[1]] = 0.0
+
+ fits.write_image(
+ data,
+ compress=compress,
+ qlevel=16,
+ qmethod='SUBTRACTIVE_DITHER_2',
+ )
+ rdata = fits[-1].read()
+
+ for zind in zinds:
+ assert rdata[zind[0], zind[1]] == 0.0
+
+
+@pytest.mark.parametrize(
+ 'compress',
+ [
+ 'rice',
+ 'hcompress',
+ 'plio',
+ ]
+)
+@pytest.mark.parametrize(
+ 'seed_type',
+ ['matched', 'unmatched', 'checksum', 'checksum_int'],
+)
+@pytest.mark.parametrize(
+ 'use_fits_object',
+ [False, True],
+)
+@pytest.mark.parametrize(
+ 'dtype',
+ ['f4', 'f8'],
+)
+def test_compressed_seed(compress, seed_type, use_fits_object, dtype):
+ """
+ Test writing and reading a rice compressed image
+ """
+ nrows = 5
+ ncols = 20
+
+ qlevel = 16
+
+ seed = 1919
+ rng = np.random.RandomState(seed)
+
+ if seed_type == 'matched':
+ # dither_seed = 9881
+ dither_seed1 = 9881
+ dither_seed2 = 9881
+ elif seed_type == 'unmatched':
+ # dither_seed = None
+ dither_seed1 = 3
+ dither_seed2 = 4
+ elif seed_type == 'checksum':
+ dither_seed1 = 'checksum'
+ dither_seed2 = b'checksum'
+ elif seed_type == 'checksum_int':
+ dither_seed1 = -1
+ # any negative means use checksum
+ dither_seed2 = -3
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname1 = os.path.join(tmpdir, 'test1.fits')
+ fname2 = os.path.join(tmpdir, 'test2.fits')
+
+ data = rng.normal(size=(nrows, ncols))
+ if compress == 'plio':
+ data = data.clip(min=0)
+ data = data.astype(dtype)
+
+ if use_fits_object:
+ with FITS(fname1, 'rw') as fits1:
+ fits1.write(
+ data, compress=compress, qlevel=qlevel,
+ # dither_seed=dither_seed,
+ dither_seed=dither_seed1,
+ )
+ rdata1 = fits1[-1].read()
+
+ with FITS(fname2, 'rw') as fits2:
+ fits2.write(
+ data, compress=compress, qlevel=qlevel,
+ # dither_seed=dither_seed,
+ dither_seed=dither_seed2,
+ )
+ rdata2 = fits2[-1].read()
+ else:
+ write(
+ fname1, data, compress=compress, qlevel=qlevel,
+ # dither_seed=dither_seed,
+ dither_seed=dither_seed1,
+ )
+ rdata1 = read(fname1)
+
+ write(
+ fname2, data, compress=compress, qlevel=qlevel,
+ # dither_seed=dither_seed,
+ dither_seed=dither_seed2,
+ )
+ rdata2 = read(fname2)
+
+ mess = "%s compressed images ('%s')" % (compress, dtype)
+
+ if seed_type in ['checksum', 'checksum_int', 'matched']:
+ assert np.all(rdata1 == rdata2), mess
+ else:
+ assert np.all(rdata1 != rdata2), mess
+
+
+@pytest.mark.parametrize(
+ 'dither_seed',
+ ['blah', 10_001],
+)
+def test_compressed_seed_bad(dither_seed):
+ """
+ Test writing and reading a rice compressed image
+ """
+ compress = 'rice'
+ dtype = 'f4'
+ nrows = 5
+ ncols = 20
+
+ qlevel = 16
+
+ seed = 1919
+ rng = np.random.RandomState(seed)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ data = rng.normal(size=(nrows, ncols))
+ data = data.astype(dtype)
+
+ with pytest.raises(ValueError):
+ write(
+ fname, data, compress=compress, qlevel=qlevel,
+ dither_seed=dither_seed,
+ )
+
+
+if __name__ == '__main__':
+ test_compressed_seed(
+ compress='rice',
+ match_seed=False,
+ use_fits_object=True,
+ dtype='f4',
+ )
--- /dev/null
+import os
+import tempfile
+import numpy as np
+from ..fitslib import FITS, read_header
+from .checks import compare_array, compare_rec
+
+
+def test_move_by_name():
+ """
+ test moving hdus by name
+ """
+
+ nrows = 3
+
+ seed = 1234
+ rng = np.random.RandomState(seed)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ data1 = np.zeros(nrows, dtype=[('ra', 'f8'), ('dec', 'f8')])
+ data1['ra'] = rng.uniform(nrows)
+ data1['dec'] = rng.uniform(nrows)
+ fits.write_table(data1, extname='mytable')
+
+ fits[-1].write_key('EXTVER', 1)
+
+ data2 = np.zeros(nrows, dtype=[('ra', 'f8'), ('dec', 'f8')])
+ data2['ra'] = rng.uniform(nrows)
+ data2['dec'] = rng.uniform(nrows)
+
+ fits.write_table(data2, extname='mytable')
+ fits[-1].write_key('EXTVER', 2)
+
+ hdunum1 = fits.movnam_hdu('mytable', extver=1)
+ assert hdunum1 == 2
+ hdunum2 = fits.movnam_hdu('mytable', extver=2)
+ assert hdunum2 == 3
+
+
+def test_ext_ver():
+ """
+ Test using extname and extver, all combinations I can think of
+ """
+
+ seed = 9889
+ rng = np.random.RandomState(seed)
+
+ dtype = [('num', 'i4'), ('ra', 'f8'), ('dec', 'f8')]
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ img1 = np.arange(2*3, dtype='i4').reshape(2, 3) + 5
+ img2 = np.arange(2*3, dtype='i4').reshape(2, 3) + 6
+ img3 = np.arange(2*3, dtype='i4').reshape(2, 3) + 7
+
+ nrows = 3
+ data1 = np.zeros(nrows, dtype=dtype)
+
+ data1['num'] = 1
+ data1['ra'] = rng.uniform(nrows)
+ data1['dec'] = rng.uniform(nrows)
+
+ data2 = np.zeros(nrows, dtype=dtype)
+
+ data2['num'] = 2
+ data2['ra'] = rng.uniform(nrows)
+ data2['dec'] = rng.uniform(nrows)
+
+ data3 = np.zeros(nrows, dtype=dtype)
+ data3['num'] = 3
+ data3['ra'] = rng.uniform(nrows)
+ data3['dec'] = rng.uniform(nrows)
+
+ hdr1 = {'k1': 'key1'}
+ hdr2 = {'k2': 'key2'}
+
+ fits.write_image(img1, extname='myimage', header=hdr1, extver=1)
+ fits.write_table(data1)
+ fits.write_table(data2, extname='mytable', extver=1)
+ fits.write_image(img2, extname='myimage', header=hdr2, extver=2)
+ fits.write_table(data3, extname='mytable', extver=2)
+ fits.write_image(img3)
+
+ d1 = fits[1].read()
+ d2 = fits['mytable'].read()
+ d2b = fits['mytable', 1].read()
+ d3 = fits['mytable', 2].read()
+
+ for f in data1.dtype.names:
+ compare_rec(data1, d1, "data1")
+ compare_rec(data2, d2, "data2")
+ compare_rec(data2, d2b, "data2b")
+ compare_rec(data3, d3, "data3")
+
+ dimg1 = fits[0].read()
+ dimg1b = fits['myimage', 1].read()
+ dimg2 = fits['myimage', 2].read()
+ dimg3 = fits[5].read()
+
+ compare_array(img1, dimg1, "img1")
+ compare_array(img1, dimg1b, "img1b")
+ compare_array(img2, dimg2, "img2")
+ compare_array(img3, dimg3, "img3")
+
+ rhdr1 = read_header(fname, ext='myimage', extver=1)
+ rhdr2 = read_header(fname, ext='myimage', extver=2)
+ assert 'k1' in rhdr1, 'testing k1 in header version 1'
+ assert 'k2' in rhdr2, 'testing k2 in header version 2'
--- /dev/null
+import pytest
+import numpy as np
+import os
+import tempfile
+from .checks import (
+ compare_names,
+ compare_array,
+ compare_array_tol,
+ compare_object_array,
+ compare_rec,
+ compare_headerlist_header,
+ compare_rec_with_var,
+ compare_rec_subrows,
+)
+from .makedata import make_data
+from ..fitslib import FITS, write, read
+from .. import util
+
+DTYPES = ['u1', 'i1', 'u2', 'i2', '<u4', 'i4', 'i8', '>f4', 'f8']
+
+
+def test_table_read_write():
+
+ adata = make_data()
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write_table(
+ adata['data'], header=adata['keys'], extname='mytable'
+ )
+
+ d = fits[1].read()
+ compare_rec(adata['data'], d, "table read/write")
+
+ h = fits[1].read_header()
+ compare_headerlist_header(adata['keys'], h)
+
+ # see if our convenience functions are working
+ write(
+ fname,
+ adata['data2'],
+ extname="newext",
+ header={'ra': 335.2, 'dec': -25.2},
+ )
+ d = read(fname, ext='newext')
+ compare_rec(adata['data2'], d, "table data2")
+
+ # now test read_column
+ with FITS(fname) as fits:
+
+ for f in adata['data'].dtype.names:
+ d = fits[1].read_column(f)
+ compare_array(
+ adata['data'][f], d, "table 1 single field read '%s'" % f
+ )
+
+ for f in adata['data2'].dtype.names:
+ d = fits['newext'].read_column(f)
+ compare_array(
+ adata['data2'][f], d, "table 2 single field read '%s'" % f
+ )
+
+ # now list of columns
+ for cols in [['u2scalar', 'f4vec', 'Sarr'],
+ ['f8scalar', 'u2arr', 'Sscalar']]:
+ d = fits[1].read(columns=cols)
+ for f in d.dtype.names:
+ compare_array(
+ adata['data'][f][:], d[f], "test column list %s" % f
+ )
+
+ for rows in [[1, 3], [3, 1], [2, 2, 1]]:
+ d = fits[1].read(columns=cols, rows=rows)
+ for col in d.dtype.names:
+ compare_array(
+ adata['data'][col][rows], d[col],
+ "test column list %s row subset" % col
+ )
+ for col in cols:
+ d = fits[1].read_column(col, rows=rows)
+ compare_array(
+ adata['data'][col][rows], d,
+ "test column list %s row subset" % col
+ )
+
+
+def test_table_column_index_scalar():
+ """
+ Test a basic table write, data and a header, then reading back in to
+ check the values
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.empty(1, dtype=[('Z', 'f8')])
+ data['Z'][:] = 1.0
+ fits.write_table(data)
+ fits.write_table(data)
+
+ with FITS(fname, 'r') as fits:
+ assert fits[1]['Z'][0].ndim == 0
+ assert fits[1][0].ndim == 0
+
+
+def test_table_read_empty_rows():
+ """
+ test reading empty list of rows from an table.
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.empty(1, dtype=[('Z', 'f8')])
+ data['Z'][:] = 1.0
+ fits.write_table(data)
+ fits.write_table(data)
+
+ with FITS(fname, 'r') as fits:
+ assert len(fits[1].read(rows=[])) == 0
+ assert len(fits[1].read(rows=range(0, 0))) == 0
+ assert len(fits[1].read(rows=np.arange(0, 0))) == 0
+
+
+def test_table_format_column_subset():
+ """
+ Test a basic table write, data and a header, then reading back in to
+ check the values
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ data = np.empty(1, dtype=[('Z', 'f8'), ('Z_PERSON', 'f8')])
+ data['Z'][:] = 1.0
+ data['Z_PERSON'][:] = 1.0
+ fits.write_table(data)
+ fits.write_table(data)
+ fits.write_table(data)
+
+ with FITS(fname, 'r') as fits:
+ # assert we do not have an extra row of 'Z'
+ sz = str(fits[2]['Z_PERSON']).split('\n')
+ s = str(fits[2][('Z_PERSON', 'Z')]).split('\n')
+ assert len(sz) == len(s) - 1
+
+
+def test_table_write_dict_of_arrays_scratch():
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ d = {}
+ for n in data.dtype.names:
+ d[n] = data[n]
+
+ fits.write(d)
+
+ d = read(fname)
+ compare_rec(data, d, "list of dicts, scratch")
+
+
+def test_table_write_dict_of_arrays():
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.create_table_hdu(data, extname='mytable')
+
+ d = {}
+ for n in data.dtype.names:
+ d[n] = data[n]
+
+ fits[-1].write(d)
+
+ d = read(fname)
+ compare_rec(data, d, "list of dicts")
+
+
+def test_table_write_dict_of_arrays_var():
+ """
+ This version creating the table from a dict of arrays, variable
+ lenght columns
+ """
+
+ adata = make_data()
+ vardata = adata['vardata']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ d = {}
+ for n in vardata.dtype.names:
+ d[n] = vardata[n]
+
+ fits.write(d)
+
+ d = read(fname)
+ compare_rec_with_var(vardata, d, "dict of arrays, var")
+
+
+def test_table_write_list_of_arrays_scratch():
+ """
+ This version creating the table from the names and list, creating
+ table first
+ """
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ names = [n for n in data.dtype.names]
+ dlist = [data[n] for n in data.dtype.names]
+ fits.write(dlist, names=names)
+
+ d = read(fname)
+ compare_rec(data, d, "list of arrays, scratch")
+
+
+def test_table_write_list_of_arrays():
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.create_table_hdu(data, extname='mytable')
+
+ columns = [n for n in data.dtype.names]
+ dlist = [data[n] for n in data.dtype.names]
+ fits[-1].write(dlist, columns=columns)
+
+ d = read(fname, ext='mytable')
+ compare_rec(data, d, "list of arrays")
+
+
+def test_table_write_list_of_arrays_var():
+ """
+ This version creating the table from the names and list, variable
+ lenght cols
+ """
+ adata = make_data()
+ vardata = adata['vardata']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ names = [n for n in vardata.dtype.names]
+ dlist = [vardata[n] for n in vardata.dtype.names]
+ fits.write(dlist, names=names)
+
+ d = read(fname)
+ compare_rec_with_var(vardata, d, "list of arrays, var")
+
+
+def test_table_write_bad_string():
+
+ for d in ['S0', 'U0']:
+ dt = [('s', d)]
+
+ # old numpy didn't allow this dtype, so will throw
+ # a TypeError for empty dtype
+ try:
+ data = np.zeros(1, dtype=dt)
+ supported = True
+ except TypeError:
+ supported = False
+
+ if supported:
+ with pytest.raises(ValueError):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+ with FITS(fname, 'rw') as fits:
+ fits.write(data)
+
+
+def test_variable_length_columns():
+
+ adata = make_data()
+ vardata = adata['vardata']
+
+ for vstorage in ['fixed', 'object']:
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw', vstorage=vstorage) as fits:
+ fits.write(vardata)
+
+ # reading multiple columns
+ d = fits[1].read()
+ compare_rec_with_var(
+ vardata, d, "read all test '%s'" % vstorage
+ )
+
+ cols = ['u2scalar', 'Sobj']
+ d = fits[1].read(columns=cols)
+ compare_rec_with_var(
+ vardata, d, "read all test subcols '%s'" % vstorage
+ )
+
+ # one at a time
+ for f in vardata.dtype.names:
+ d = fits[1].read_column(f)
+ if util.is_object(vardata[f]):
+ compare_object_array(
+ vardata[f], d,
+ "read all field '%s'" % f
+ )
+
+ # same as above with slices
+ # reading multiple columns
+ d = fits[1][:]
+ compare_rec_with_var(
+ vardata, d, "read all test '%s'" % vstorage
+ )
+
+ d = fits[1][cols][:]
+ compare_rec_with_var(
+ vardata, d, "read all test subcols '%s'" % vstorage
+ )
+
+ # one at a time
+ for f in vardata.dtype.names:
+ d = fits[1][f][:]
+ if util.is_object(vardata[f]):
+ compare_object_array(
+ vardata[f], d,
+ "read all field '%s'" % f
+ )
+
+ #
+ # now same with sub rows
+ #
+
+ # reading multiple columns, sorted and unsorted
+ for rows in [[0, 2], [2, 0]]:
+ d = fits[1].read(rows=rows)
+ compare_rec_with_var(
+ vardata, d, "read subrows test '%s'" % vstorage,
+ rows=rows,
+ )
+
+ d = fits[1].read(columns=cols, rows=rows)
+ compare_rec_with_var(
+ vardata,
+ d,
+ "read subrows test subcols '%s'" % vstorage,
+ rows=rows,
+ )
+
+ # one at a time
+ for f in vardata.dtype.names:
+ d = fits[1].read_column(f, rows=rows)
+ if util.is_object(vardata[f]):
+ compare_object_array(
+ vardata[f], d,
+ "read subrows field '%s'" % f,
+ rows=rows,
+ )
+
+ # same as above with slices
+ # reading multiple columns
+ d = fits[1][rows]
+ compare_rec_with_var(
+ vardata, d, "read subrows slice test '%s'" % vstorage,
+ rows=rows,
+ )
+ d = fits[1][2:4]
+ compare_rec_with_var(
+ vardata,
+ d,
+ "read slice test '%s'" % vstorage,
+ rows=[2, 3],
+ )
+
+ d = fits[1][cols][rows]
+ compare_rec_with_var(
+ vardata,
+ d,
+ "read subcols subrows slice test '%s'" % vstorage,
+ rows=rows,
+ )
+
+ d = fits[1][cols][2:4]
+
+ compare_rec_with_var(
+ vardata,
+ d,
+ "read subcols slice test '%s'" % vstorage,
+ rows=[2, 3],
+ )
+
+ # one at a time
+ for f in vardata.dtype.names:
+ d = fits[1][f][rows]
+ if util.is_object(vardata[f]):
+ compare_object_array(
+ vardata[f], d,
+ "read subrows field '%s'" % f,
+ rows=rows,
+ )
+ d = fits[1][f][2:4]
+ if util.is_object(vardata[f]):
+ compare_object_array(
+ vardata[f], d,
+ "read slice field '%s'" % f,
+ rows=[2, 3],
+ )
+
+
+def test_table_iter():
+ """
+ Test iterating over rows of a table
+ """
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write_table(
+ data,
+ header=adata['keys'],
+ extname='mytable'
+ )
+
+ # one row at a time
+ with FITS(fname) as fits:
+ hdu = fits["mytable"]
+ i = 0
+ for row_data in hdu:
+ compare_rec(data[i], row_data, "table data")
+ i += 1
+
+
+def test_ascii_table_write_read():
+ """
+ Test write and read for an ascii table
+ """
+
+ adata = make_data()
+ ascii_data = adata['ascii_data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.write_table(
+ ascii_data,
+ table_type='ascii',
+ header=adata['keys'],
+ extname='mytable',
+ )
+
+ # cfitsio always reports type as i4 and f8, period, even if if
+ # written with higher precision. Need to fix that somehow
+ for f in ascii_data.dtype.names:
+ d = fits[1].read_column(f)
+ if d.dtype == np.float64:
+ # note we should be able to do 1.11e-16 in principle, but
+ # in practice we get more like 2.15e-16
+ compare_array_tol(
+ ascii_data[f], d, 2.15e-16, "table field read '%s'" % f
+ )
+ else:
+ compare_array(
+ ascii_data[f], d, "table field read '%s'" % f
+ )
+
+ for rows in [[1, 3], [3, 1]]:
+ for f in ascii_data.dtype.names:
+ d = fits[1].read_column(f, rows=rows)
+ if d.dtype == np.float64:
+ compare_array_tol(ascii_data[f][rows], d, 2.15e-16,
+ "table field read subrows '%s'" % f)
+ else:
+ compare_array(ascii_data[f][rows], d,
+ "table field read subrows '%s'" % f)
+
+ beg = 1
+ end = 3
+ for f in ascii_data.dtype.names:
+ d = fits[1][f][beg:end]
+ if d.dtype == np.float64:
+ compare_array_tol(ascii_data[f][beg:end], d, 2.15e-16,
+ "table field read slice '%s'" % f)
+ else:
+ compare_array(ascii_data[f][beg:end], d,
+ "table field read slice '%s'" % f)
+
+ cols = ['i2scalar', 'f4scalar']
+ for f in ascii_data.dtype.names:
+ data = fits[1].read(columns=cols)
+ for f in data.dtype.names:
+ d = data[f]
+ if d.dtype == np.float64:
+ compare_array_tol(
+ ascii_data[f],
+ d,
+ 2.15e-16,
+ "table subcol, '%s'" % f
+ )
+ else:
+ compare_array(
+ ascii_data[f], d, "table subcol, '%s'" % f
+ )
+
+ data = fits[1][cols][:]
+ for f in data.dtype.names:
+ d = data[f]
+ if d.dtype == np.float64:
+ compare_array_tol(
+ ascii_data[f],
+ d,
+ 2.15e-16,
+ "table subcol, '%s'" % f
+ )
+ else:
+ compare_array(
+ ascii_data[f], d, "table subcol, '%s'" % f
+ )
+
+ for rows in [[1, 3], [3, 1]]:
+ for f in ascii_data.dtype.names:
+ data = fits[1].read(columns=cols, rows=rows)
+ for f in data.dtype.names:
+ d = data[f]
+ if d.dtype == np.float64:
+ compare_array_tol(ascii_data[f][rows], d, 2.15e-16,
+ "table subcol, '%s'" % f)
+ else:
+ compare_array(ascii_data[f][rows], d,
+ "table subcol, '%s'" % f)
+
+ data = fits[1][cols][rows]
+ for f in data.dtype.names:
+ d = data[f]
+ if d.dtype == np.float64:
+ compare_array_tol(ascii_data[f][rows], d, 2.15e-16,
+ "table subcol/row, '%s'" % f)
+ else:
+ compare_array(ascii_data[f][rows], d,
+ "table subcol/row, '%s'" % f)
+
+ for f in ascii_data.dtype.names:
+
+ data = fits[1][cols][beg:end]
+ for f in data.dtype.names:
+ d = data[f]
+ if d.dtype == np.float64:
+ compare_array_tol(ascii_data[f][beg:end], d, 2.15e-16,
+ "table subcol/slice, '%s'" % f)
+ else:
+ compare_array(ascii_data[f][beg:end], d,
+ "table subcol/slice, '%s'" % f)
+
+
+def test_table_insert_column():
+ """
+ Insert a new column
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.write_table(data, header=adata['keys'], extname='mytable')
+
+ d = fits[1].read()
+
+ for n in d.dtype.names:
+ newname = n+'_insert'
+
+ fits[1].insert_column(newname, d[n])
+
+ newdata = fits[1][newname][:]
+
+ compare_array(
+ d[n],
+ newdata,
+ "table single field insert and read '%s'" % n
+ )
+
+
+def test_table_delete_row_range():
+ """
+ Test deleting a range of rows using the delete_rows method
+ """
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write_table(data)
+
+ rowslice = slice(1, 3)
+ with FITS(fname, 'rw') as fits:
+ fits[1].delete_rows(rowslice)
+
+ with FITS(fname) as fits:
+ d = fits[1].read()
+
+ compare_data = data[[0, 3]]
+ compare_rec(compare_data, d, "delete row range")
+
+
+def test_table_delete_rows():
+ """
+ Test deleting specific set of rows using the delete_rows method
+ """
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write_table(data)
+
+ rows2delete = [1, 3]
+ with FITS(fname, 'rw') as fits:
+ fits[1].delete_rows(rows2delete)
+
+ with FITS(fname) as fits:
+ d = fits[1].read()
+
+ compare_data = data[[0, 2]]
+ compare_rec(compare_data, d, "delete rows")
+
+
+def test_table_where():
+ """
+ Use the where method to get indices for a row filter expression
+ """
+
+ adata = make_data()
+ data2 = adata['data2']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write_table(data2)
+
+ #
+ # get all indices
+ #
+ with FITS(fname) as fits:
+ a = fits[1].where('x > 3 && y < 8')
+ b = np.where((data2['x'] > 3) & (data2['y'] < 8))[0]
+ np.testing.assert_array_equal(a, b)
+
+ #
+ # get slice of indices
+ #
+ with FITS(fname) as fits:
+ a = fits[1].where('x > 3 && y < 8', 2, 8)
+ b = np.where((data2['x'][2:8] > 3) & (data2['y'][2:8] < 8))[0]
+ np.testing.assert_array_equal(a, b)
+
+
+def test_table_resize():
+ """
+ Use the resize method to change the size of a table
+
+ default values get filled in and these are tested
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ #
+ # shrink from back
+ #
+ with FITS(fname, 'rw', clobber=True) as fits:
+ fits.write_table(data)
+
+ nrows = 2
+ with FITS(fname, 'rw') as fits:
+ fits[1].resize(nrows)
+
+ with FITS(fname) as fits:
+ d = fits[1].read()
+
+ compare_data = data[0:nrows]
+ compare_rec(compare_data, d, "shrink from back")
+
+ #
+ # shrink from front
+ #
+ with FITS(fname, 'rw', clobber=True) as fits:
+ fits.write_table(data)
+
+ with FITS(fname, 'rw') as fits:
+ fits[1].resize(nrows, front=True)
+
+ with FITS(fname) as fits:
+ d = fits[1].read()
+
+ compare_data = data[nrows-data.size:]
+ compare_rec(compare_data, d, "shrink from front")
+
+ # These don't get zerod
+
+ nrows = 10
+ add_data = np.zeros(nrows-data.size, dtype=data.dtype)
+ add_data['i1scalar'] = -128
+ add_data['i1vec'] = -128
+ add_data['i1arr'] = -128
+ add_data['u2scalar'] = 32768
+ add_data['u2vec'] = 32768
+ add_data['u2arr'] = 32768
+ add_data['u4scalar'] = 2147483648
+ add_data['u4vec'] = 2147483648
+ add_data['u4arr'] = 2147483648
+
+ #
+ # expand at the back
+ #
+ with FITS(fname, 'rw', clobber=True) as fits:
+ fits.write_table(data)
+ with FITS(fname, 'rw') as fits:
+ fits[1].resize(nrows)
+
+ with FITS(fname) as fits:
+ d = fits[1].read()
+
+ compare_data = np.hstack((data, add_data))
+ compare_rec(compare_data, d, "expand at the back")
+
+ #
+ # expand at the front
+ #
+ with FITS(fname, 'rw', clobber=True) as fits:
+ fits.write_table(data)
+ with FITS(fname, 'rw') as fits:
+ fits[1].resize(nrows, front=True)
+
+ with FITS(fname) as fits:
+ d = fits[1].read()
+
+ compare_data = np.hstack((add_data, data))
+ # These don't get zerod
+ compare_rec(compare_data, d, "expand at the front")
+
+
+def test_slice():
+ """
+ Test reading by slice
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ # initial write
+ fits.write_table(data)
+
+ # test reading single columns
+ for f in data.dtype.names:
+ d = fits[1][f][:]
+ compare_array(
+ data[f], d, "test read all rows %s column subset" % f
+ )
+
+ # test reading row subsets
+ rows = [1, 3]
+ for f in data.dtype.names:
+ d = fits[1][f][rows]
+ compare_array(data[f][rows], d, "test %s row subset" % f)
+ for f in data.dtype.names:
+ d = fits[1][f][1:3]
+ compare_array(data[f][1:3], d, "test %s row slice" % f)
+ for f in data.dtype.names:
+ d = fits[1][f][1:4:2]
+ compare_array(
+ data[f][1:4:2], d, "test %s row slice with step" % f
+ )
+ for f in data.dtype.names:
+ d = fits[1][f][::2]
+ compare_array(
+ data[f][::2], d, "test %s row slice with only setp" % f
+ )
+
+ # now list of columns
+ cols = ['u2scalar', 'f4vec', 'Sarr']
+ d = fits[1][cols][:]
+ for f in d.dtype.names:
+ compare_array(data[f][:], d[f], "test column list %s" % f)
+
+ cols = ['u2scalar', 'f4vec', 'Sarr']
+ d = fits[1][cols][rows]
+ for f in d.dtype.names:
+ compare_array(
+ data[f][rows], d[f], "test column list %s row subset" % f
+ )
+
+ cols = ['u2scalar', 'f4vec', 'Sarr']
+ d = fits[1][cols][1:3]
+ for f in d.dtype.names:
+ compare_array(
+ data[f][1:3], d[f], "test column list %s row slice" % f
+ )
+
+
+def test_table_append():
+ """
+ Test creating a table and appending new rows.
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ # initial write
+ fits.write_table(data, header=adata['keys'], extname='mytable')
+ # now append
+ data2 = data.copy()
+ data2['f4scalar'] = 3
+ fits[1].append(data2)
+
+ d = fits[1].read()
+ assert d.size == data.size*2
+
+ compare_rec(data, d[0:data.size], "Comparing initial write")
+ compare_rec(data2, d[data.size:], "Comparing appended data")
+
+ h = fits[1].read_header()
+ compare_headerlist_header(adata['keys'], h)
+
+ # append with list of arrays and names
+ names = data.dtype.names
+ data3 = [np.array(data[name]) for name in names]
+ fits[1].append(data3, names=names)
+
+ d = fits[1].read()
+ assert d.size == data.size*3
+ compare_rec(data, d[2*data.size:], "Comparing appended data")
+
+ # append with list of arrays and columns
+ fits[1].append(data3, columns=names)
+
+ d = fits[1].read()
+ assert d.size == data.size*4
+ compare_rec(data, d[3*data.size:], "Comparing appended data")
+
+
+def test_table_subsets():
+ """
+ testing reading subsets
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.write_table(data, header=adata['keys'], extname='mytable')
+
+ for rows in [[1, 3], [3, 1]]:
+ d = fits[1].read(rows=rows)
+ compare_rec_subrows(data, d, rows, "table subset")
+ columns = ['i1scalar', 'f4arr']
+ d = fits[1].read(columns=columns, rows=rows)
+
+ for f in columns:
+ d = fits[1].read_column(f, rows=rows)
+ compare_array(
+ data[f][rows], d, "row subset, multi-column '%s'" % f
+ )
+ for f in data.dtype.names:
+ d = fits[1].read_column(f, rows=rows)
+ compare_array(
+ data[f][rows], d, "row subset, column '%s'" % f
+ )
+
+
+def test_gz_write_read():
+ """
+ Test a basic table write, data and a header, then reading back in to
+ check the values
+
+ this code all works, but the file is zere size when done!
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.write_table(data, header=adata['keys'], extname='mytable')
+
+ d = fits[1].read()
+ compare_rec(data, d, "gzip write/read")
+
+ h = fits[1].read_header()
+ for entry in adata['keys']:
+ name = entry['name'].upper()
+ value = entry['value']
+ hvalue = h[name]
+ if isinstance(hvalue, str):
+ hvalue = hvalue.strip()
+ assert value == hvalue, "testing header key '%s'" % name
+
+ if 'comment' in entry:
+ assert (
+ entry['comment'].strip()
+ == h.get_comment(name).strip()
+ ), (
+ "testing comment for header key '%s'" % name
+ )
+
+ stat = os.stat(fname)
+ assert stat.st_size != 0, "Making sure the data was flushed to disk"
+
+
+@pytest.mark.skipif('SKIP_BZIP_TEST' in os.environ,
+ reason='SKIP_BZIP_TEST set')
+def test_bz2_read():
+ '''
+ Write a normal .fits file, run bzip2 on it, then read the bz2
+ file and verify that it's the same as what we put in; we don't
+ [currently support or] test *writing* bzip2.
+ '''
+
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ bzfname = fname + '.bz2'
+
+ try:
+ fits = FITS(fname, 'rw')
+ fits.write_table(data, header=adata['keys'], extname='mytable')
+ fits.close()
+
+ os.system('bzip2 %s' % fname)
+ f2 = FITS(bzfname)
+ d = f2[1].read()
+ compare_rec(data, d, "bzip2 read")
+
+ h = f2[1].read_header()
+ for entry in adata['keys']:
+ name = entry['name'].upper()
+ value = entry['value']
+ hvalue = h[name]
+ if isinstance(hvalue, str):
+ hvalue = hvalue.strip()
+
+ assert value == hvalue, "testing header key '%s'" % name
+
+ if 'comment' in entry:
+ assert (
+ entry['comment'].strip()
+ == h.get_comment(name).strip()
+ ), (
+ "testing comment for header key '%s'" % name
+ )
+ except Exception:
+ import traceback
+ traceback.print_exc()
+
+ assert False, 'Exception in testing bzip2 reading'
+
+
+def test_checksum():
+ """
+ test that checksumming works
+ """
+ adata = make_data()
+ data = adata['data']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ fits.write_table(data, header=adata['keys'], extname='mytable')
+ fits[1].write_checksum()
+ fits[1].verify_checksum()
+
+
+def test_trim_strings():
+ """
+ test mode where we strim strings on read
+ """
+
+ dt = [('fval', 'f8'), ('name', 'S15'), ('vec', 'f4', 2)]
+ n = 3
+ data = np.zeros(n, dtype=dt)
+ data['fval'] = np.random.random(n)
+ data['vec'] = np.random.random(n*2).reshape(n, 2)
+
+ data['name'] = ['mike', 'really_long_name_to_fill', 'jan']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write(data)
+
+ for onconstruct in [True, False]:
+ if onconstruct:
+ ctrim = True
+ otrim = False
+ else:
+ ctrim = False
+ otrim = True
+
+ with FITS(fname, 'rw', trim_strings=ctrim) as fits:
+
+ if ctrim:
+ dread = fits[1][:]
+ compare_rec(
+ data,
+ dread,
+ "trimmed strings constructor",
+ )
+
+ dname = fits[1]['name'][:]
+ compare_array(
+ data['name'],
+ dname,
+ "trimmed strings col read, constructor",
+ )
+ dread = fits[1][['name']][:]
+ compare_array(
+ data['name'],
+ dread['name'],
+ "trimmed strings col read, constructor",
+ )
+
+ dread = fits[1].read(trim_strings=otrim)
+ compare_rec(
+ data,
+ dread,
+ "trimmed strings keyword",
+ )
+ dname = fits[1].read(columns='name', trim_strings=otrim)
+ compare_array(
+ data['name'],
+ dname,
+ "trimmed strings col keyword",
+ )
+ dread = fits[1].read(columns=['name'], trim_strings=otrim)
+ compare_array(
+ data['name'],
+ dread['name'],
+ "trimmed strings col keyword",
+ )
+
+ # convenience function
+ dread = read(fname, trim_strings=True)
+ compare_rec(
+ data,
+ dread,
+ "trimmed strings convenience function",
+ )
+ dname = read(fname, columns='name', trim_strings=True)
+ compare_array(
+ data['name'],
+ dname,
+ "trimmed strings col convenience function",
+ )
+ dread = read(fname, columns=['name'], trim_strings=True)
+ compare_array(
+ data['name'],
+ dread['name'],
+ "trimmed strings col convenience function",
+ )
+
+
+def test_lower_upper():
+ """
+ test forcing names to upper and lower
+ """
+
+ rng = np.random.RandomState(8908)
+
+ dt = [('MyName', 'f8'), ('StuffThings', 'i4'), ('Blah', 'f4')]
+ data = np.zeros(3, dtype=dt)
+ data['MyName'] = rng.uniform(data.size)
+ data['StuffThings'] = rng.uniform(data.size)
+ data['Blah'] = rng.uniform(data.size)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write(data)
+
+ for i in [1, 2]:
+ if i == 1:
+ lower = True
+ upper = False
+ else:
+ lower = False
+ upper = True
+
+ with FITS(fname, 'rw', lower=lower, upper=upper) as fits:
+ for rows in [None, [1, 2]]:
+
+ d = fits[1].read(rows=rows)
+ compare_names(d.dtype.names, data.dtype.names,
+ lower=lower, upper=upper)
+
+ d = fits[1].read(
+ rows=rows, columns=['MyName', 'stuffthings']
+ )
+ compare_names(d.dtype.names, data.dtype.names[0:2],
+ lower=lower, upper=upper)
+
+ d = fits[1][1:2]
+ compare_names(d.dtype.names, data.dtype.names,
+ lower=lower, upper=upper)
+
+ if rows is not None:
+ d = fits[1][rows]
+ else:
+ d = fits[1][:]
+
+ compare_names(d.dtype.names, data.dtype.names,
+ lower=lower, upper=upper)
+
+ if rows is not None:
+ d = fits[1][['myname', 'stuffthings']][rows]
+ else:
+ d = fits[1][['myname', 'stuffthings']][:]
+
+ compare_names(d.dtype.names, data.dtype.names[0:2],
+ lower=lower, upper=upper)
+
+ # using overrides
+ with FITS(fname, 'rw') as fits:
+ for rows in [None, [1, 2]]:
+
+ d = fits[1].read(rows=rows, lower=lower, upper=upper)
+ compare_names(d.dtype.names, data.dtype.names,
+ lower=lower, upper=upper)
+
+ d = fits[1].read(
+ rows=rows, columns=['MyName', 'stuffthings'],
+ lower=lower, upper=upper
+ )
+ compare_names(d.dtype.names, data.dtype.names[0:2],
+ lower=lower, upper=upper)
+
+ for rows in [None, [1, 2]]:
+ d = read(fname, rows=rows, lower=lower, upper=upper)
+ compare_names(d.dtype.names, data.dtype.names,
+ lower=lower, upper=upper)
+
+ d = read(fname, rows=rows, columns=['MyName', 'stuffthings'],
+ lower=lower, upper=upper)
+ compare_names(d.dtype.names, data.dtype.names[0:2],
+ lower=lower, upper=upper)
+
+
+def test_read_raw():
+ """
+ testing reading the file as raw bytes
+ """
+ rng = np.random.RandomState(8908)
+
+ dt = [('MyName', 'f8'), ('StuffThings', 'i4'), ('Blah', 'f4')]
+ data = np.zeros(3, dtype=dt)
+ data['MyName'] = rng.uniform(data.size)
+ data['StuffThings'] = rng.uniform(data.size)
+ data['Blah'] = rng.uniform(data.size)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ try:
+ with FITS(fname, 'rw') as fits:
+ fits.write(data)
+ raw1 = fits.read_raw()
+
+ with FITS('mem://', 'rw') as fits:
+ fits.write(data)
+ raw2 = fits.read_raw()
+
+ with open(fname, 'rb') as fobj:
+ raw3 = fobj.read()
+
+ assert raw1 == raw2
+ assert raw1 == raw3
+ except Exception:
+ import traceback
+ traceback.print_exc()
+ assert False, 'Exception in testing read_raw'
+
+
+def test_table_bitcol_read_write():
+ """
+ Test basic write/read with bitcols
+ """
+
+ adata = make_data()
+ bdata = adata['bdata']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+ fits.write_table(bdata, extname='mytable', write_bitcols=True)
+
+ d = fits[1].read()
+ compare_rec(bdata, d, "table read/write")
+
+ rows = [0, 2]
+ d = fits[1].read(rows=rows)
+ compare_rec(bdata[rows], d, "table read/write rows")
+
+ d = fits[1][:2]
+ compare_rec(bdata[:2], d, "table read/write slice")
+
+ # now test read_column
+ with FITS(fname) as fits:
+
+ for f in bdata.dtype.names:
+ d = fits[1].read_column(f)
+ compare_array(
+ bdata[f], d, "table 1 single field read '%s'" % f
+ )
+
+ # now list of columns
+ for cols in [['b1vec', 'b1arr']]:
+ d = fits[1].read(columns=cols)
+ for f in d.dtype.names:
+ compare_array(bdata[f][:], d[f], "test column list %s" % f)
+
+ for rows in [[1, 3], [3, 1]]:
+ d = fits[1].read(columns=cols, rows=rows)
+ for f in d.dtype.names:
+ compare_array(
+ bdata[f][rows],
+ d[f],
+ "test column list %s row subset" % f
+ )
+
+
+def test_table_bitcol_append():
+ """
+ Test creating a table with bitcol support and appending new rows.
+ """
+ adata = make_data()
+ bdata = adata['bdata']
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ # initial write
+ fits.write_table(bdata, extname='mytable', write_bitcols=True)
+
+ with FITS(fname, 'rw') as fits:
+ # now append
+ bdata2 = bdata.copy()
+ fits[1].append(bdata2)
+
+ d = fits[1].read()
+ assert d.size == bdata.size*2
+
+ compare_rec(bdata, d[0:bdata.size], "Comparing initial write")
+ compare_rec(bdata2, d[bdata.size:], "Comparing appended data")
+
+
+def test_table_bitcol_insert():
+ """
+ Test creating a table with bitcol support and appending new rows.
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ with FITS(fname, 'rw') as fits:
+
+ # initial write
+ nrows = 3
+ d = np.zeros(nrows, dtype=[('ra', 'f8')])
+ d['ra'] = range(d.size)
+ fits.write(d)
+
+ with FITS(fname, 'rw') as fits:
+ bcol = np.array([True, False, True])
+
+ # now append
+ fits[-1].insert_column(
+ 'bscalar_inserted', bcol, write_bitcols=True
+ )
+
+ d = fits[-1].read()
+ assert d.size == nrows, 'read size equals'
+ compare_array(bcol, d['bscalar_inserted'], "inserted bitcol")
+
+ bvec = np.array(
+ [[True, False],
+ [False, True],
+ [True, True]]
+ )
+
+ # now append
+ fits[-1].insert_column('bvec_inserted', bvec, write_bitcols=True)
+
+ d = fits[-1].read()
+ assert d.size == nrows, 'read size equals'
+ compare_array(bvec, d['bvec_inserted'], "inserted bitcol")
--- /dev/null
+import os
+import tempfile
+import warnings
+import numpy as np
+from ..fitslib import FITS
+from ..util import FITSRuntimeWarning
+
+
+def test_non_standard_key_value():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, 'test.fits')
+
+ im = np.zeros((3, 3))
+ with warnings.catch_warnings(record=True) as w:
+ with FITS(fname, 'rw') as fits:
+ fits.write(im)
+
+ # now write a key with a non-standard value
+ value = {'test': 3}
+ fits[-1].write_key('odd', value)
+
+ # DeprecationWarnings have crept into the Warning list. This will
+ # filter the list to be just
+ # FITSRuntimeWarning instances.
+ # @at88mph 2019.10.09
+ filtered_warnings = list(
+ filter(lambda x: 'FITSRuntimeWarning' in '{}'.format(x.category), w) # noqa
+ )
+
+ assert len(filtered_warnings) == 1, (
+ 'Wrong length of output (Expected {} but got {}.)'.format(
+ 1, len(filtered_warnings),
+ )
+ )
+ assert issubclass(
+ filtered_warnings[-1].category, FITSRuntimeWarning,
+ )
--- /dev/null
+"""
+utilities for the fits library
+"""
+import sys
+import numpy
+
+from . import _fitsio_wrap
+
+if sys.version_info >= (3, 0, 0):
+ IS_PY3 = True
+else:
+ IS_PY3 = False
+
+
+class FITSRuntimeWarning(RuntimeWarning):
+ pass
+
+
+def cfitsio_version(asfloat=False):
+ """
+ Return the cfitsio version as a string.
+ """
+ # use string version to avoid roundoffs
+ ver = '%0.3f' % _fitsio_wrap.cfitsio_version()
+ if asfloat:
+ return float(ver)
+ else:
+ return ver
+
+
+if sys.version_info > (3, 0, 0):
+ _itypes = (int,)
+ _stypes = (str, bytes)
+else:
+ _itypes = (int, long) # noqa - only for py2
+ _stypes = (basestring, unicode,) # noqa - only for py2
+
+_itypes += (numpy.uint8, numpy.int8,
+ numpy.uint16, numpy.int16,
+ numpy.uint32, numpy.int32,
+ numpy.uint64, numpy.int64)
+
+# different for py3
+if numpy.lib.NumpyVersion(numpy.__version__) < "1.28.0":
+ _stypes += (numpy.string_, numpy.str_,)
+else:
+ _stypes += (numpy.bytes_, numpy.str_,)
+
+# for header keywords
+_ftypes = (float, numpy.float32, numpy.float64)
+
+
+def isstring(arg):
+ return isinstance(arg, _stypes)
+
+
+def isinteger(arg):
+ return isinstance(arg, _itypes)
+
+
+def is_object(arr):
+ if arr.dtype.descr[0][1][1] == 'O':
+ return True
+ else:
+ return False
+
+
+def fields_are_object(arr):
+ isobj = numpy.zeros(len(arr.dtype.names), dtype=bool)
+ for i, name in enumerate(arr.dtype.names):
+ if is_object(arr[name]):
+ isobj[i] = True
+ return isobj
+
+
+def is_little_endian(array):
+ """
+ Return True if array is little endian, False otherwise.
+
+ Parameters
+ ----------
+ array: numpy array
+ A numerical python array.
+
+ Returns
+ -------
+ Truth value:
+ True for little-endian
+
+ Notes
+ -----
+ Strings are neither big or little endian. The input must be a simple numpy
+ array, not an array with fields.
+ """
+ if numpy.little_endian:
+ machine_little = True
+ else:
+ machine_little = False
+
+ byteorder = array.dtype.base.byteorder
+ return (byteorder == '<') or (machine_little and byteorder == '=')
+
+
+def array_to_native(array, inplace=False):
+ """
+ Convert an array to the native byte order.
+
+ NOTE: the inplace keyword argument is not currently used.
+ """
+ if numpy.little_endian:
+ machine_little = True
+ else:
+ machine_little = False
+
+ data_little = False
+ if array.dtype.names is None:
+
+ if array.dtype.base.byteorder == '|':
+ # strings and 1 byte integers
+ return array
+
+ data_little = is_little_endian(array)
+ else:
+ # assume all are same byte order: we only need to find one with
+ # little endian
+ for fname in array.dtype.names:
+ if is_little_endian(array[fname]):
+ data_little = True
+ break
+
+ if ((machine_little and not data_little)
+ or (not machine_little and data_little)):
+ output = array.byteswap(inplace)
+ else:
+ output = array
+
+ return output
+
+
+if numpy.lib.NumpyVersion(numpy.__version__) >= "2.0.0":
+ copy_if_needed = None
+elif numpy.lib.NumpyVersion(numpy.__version__) < "1.28.0":
+ copy_if_needed = False
+else:
+ # 2.0.0 dev versions, handle cases where copy may or may not exist
+ try:
+ numpy.array([1]).__array__(copy=None)
+ copy_if_needed = None
+ except TypeError:
+ copy_if_needed = False
+
+
+def array_to_native_c(array_in, inplace=False):
+ # copy only made if not C order
+ arr = numpy.array(array_in, order='C', copy=copy_if_needed)
+ return array_to_native(arr, inplace=inplace)
+
+
+def mks(val):
+ """
+ make sure the value is a string, paying mind to python3 vs 2
+ """
+ if sys.version_info > (3, 0, 0):
+ if isinstance(val, bytes):
+ sval = str(val, 'utf-8')
+ else:
+ sval = str(val)
+ else:
+ sval = str(val)
+
+ return sval
--- /dev/null
+--- /home/esheldon/Downloads/cfitsio-4.2.0/Makefile.in 2022-10-31 14:40:23.000000000 -0400
++++ /home/esheldon/Downloads/cfitsio-4.2.0-mod/Makefile.in 2023-07-14 11:50:25.811296978 -0400
+@@ -69,7 +69,9 @@
+ pliocomp.c fits_hcompress.c fits_hdecompress.c \
+ simplerng.c @GSIFTP_SRC@
+
+-ZLIB_SOURCES = zcompress.c zuncompress.c
++ZLIB_SOURCES = adler32.c crc32.c deflate.c infback.c \
++ inffast.c inflate.c inftrees.c trees.c \
++ uncompr.c zutil.c zcompress.c zuncompress.c
+
+ SOURCES = ${CORE_SOURCES} ${ZLIB_SOURCES} @F77_WRAPPERS@
+
--- /dev/null
+# Patches for cfitsio
+
+This directory contains patches for the cfitsio build. These patches
+are applied before the library is compiled during the python package
+build step.
+
+The patches were generated with the script `build_cfitsio_patches.py` by
+Matthew Becker in December of 2018.
+
+## Adding New Patches
+
+To add new patches, you need to
+
+1. Make a copy of the file you want to patch.
+2. Modify it.
+3. Call `diff -u old_file new_file` to a get a unified format patch.
+4. Make sure the paths in the patch at the top look like this
+ ```
+ --- cfitsio<version>/<filename> 2018-03-01 10:28:51.000000000 -0600
+ +++ cfitsio<version>/<filename> 2018-12-14 08:39:20.000000000 -0600
+ ...
+ ```
+ where `<version>` and `<filename>` have the current cfitsio version and
+ file that is being patched.
+
+5. Commit the patch file in the patches directory with the name `<filename>.patch`.
--- /dev/null
+import os
+import argparse
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--moddir', required=True,
+ help='directory containing modified files')
+ parser.add_argument('--dir', required=True,
+ help='directory containing unmodified files')
+ parser.add_argument('--patch-dir', required=True)
+ return parser.parse_args()
+
+
+def main():
+ args = get_args()
+
+ os.makedirs(args.patch_dir, exist_ok=True)
+
+ for root, _, files in os.walk(args.dir):
+ for fname in files:
+ src = os.path.join(args.dir, fname)
+ dst = os.path.join(args.moddir, fname)
+ patch = os.path.join(args.patch_dir, fname + '.patch')
+ os.system('diff -u %s %s > %s' % (src, dst, patch))
+ with open(patch, 'rb') as fp:
+ buff = fp.read()
+ if len(buff) == 0:
+ os.remove(patch)
+ else:
+ print(fname)
+ break
+
+
+main()
--- /dev/null
+--- cfitsio-4.2.0/configure.in 2022-10-31 14:40:23.000000000 -0400
++++ cfitsio-4.2.0/configure.in 2023-07-14 11:45:00.797390794 -0400
+@@ -53,6 +53,12 @@
+ [ if test $enableval = yes; then SSE_FLAGS="$SSE_FLAGS -mssse3"; fi ]
+ )
+
++AC_ARG_ENABLE(
++ standard_strings,
++ [AS_HELP_STRING([--enable-standard-strings],[Enable use of FITSIO standard string processing])],
++ [ if test $enableval = yes; then USE_STANDARD_STRINGS=yes; fi ]
++)
++
+ SYMBOLS=""
+ AC_ARG_ENABLE(
+ symbols,
+@@ -71,6 +77,13 @@
+ AC_DEFINE(BUILD_HERA)
+ fi
+
++# ------------------------------------------------------------------------------
++# Define FITS_USE_STANDARD_STRINGS
++# ------------------------------------------------------------------------------
++if test "x$USE_STANDARD_STRINGS" = xyes; then
++ AC_DEFINE(FITS_USE_STANDARD_STRINGS)
++fi
++
+ # Optional support for bzip2 compression:
+ AC_ARG_WITH(
+ bzip2,
--- /dev/null
+--- cfitsio-4.2.0/configure 2022-10-31 14:40:23.000000000 -0400
++++ cfitsio-4.2.0/configure 2023-07-14 11:46:53.298055665 -0400
+@@ -744,6 +744,7 @@
+ enable_reentrant
+ enable_sse2
+ enable_ssse3
++enable_standard_strings
+ enable_symbols
+ enable_hera
+ with_bzip2
+@@ -1386,6 +1387,8 @@
+ --enable-sse2 Enable use of instructions in the SSE2 extended
+ instruction set
+ --enable-ssse3 Enable use of instructions in the SSSE3 extended
++ --enable-standard-strings
++ Enable use of FITSIO standard string processing
+ instruction set
+ --enable-symbols Enable debugging symbols by turning optimization off
+ --enable-hera Build for HERA (ASD use only)
+@@ -2622,6 +2625,11 @@
+
+ fi
+
++# Check whether --enable-standard_strings was given.
++if test "${enable_standard_strings+set}" = set; then :
++ enableval=$enable_standard_strings; if test $enableval = yes; then USE_STANDARD_STRINGS=yes; fi
++
++fi
+
+ SYMBOLS=""
+ # Check whether --enable-symbols was given.
+@@ -2646,6 +2654,14 @@
+
+ fi
+
++# ------------------------------------------------------------------------------
++# Define FITS_USE_STANDARD_STRINGS
++# ------------------------------------------------------------------------------
++if test "x$USE_STANDARD_STRINGS" = xyes; then
++ $as_echo "#define FITS_USE_STANDARD_STRINGS 1" >>confdefs.h
++
++fi
++
+ # Optional support for bzip2 compression:
+
+
--- /dev/null
+--- cfitsio-4.2.0/fitscore.c 2022-10-31 14:40:23.000000000 -0400
++++ cfitsio-4.2.0/fitscore.c 2023-07-14 11:47:54.870417064 -0400
+@@ -194,6 +194,21 @@
+
+ return(*version);
+ }
++
++
++/*
++ Return 1 if we are to treat strings per the FITS standard (not
++ replacing nulls with spaces, and not padding with spaces)
++*/
++int fits_use_standard_strings(void) {
++#ifdef FITS_USE_STANDARD_STRINGS
++ return 1;
++#else
++ return 0;
++#endif
++}
++
++
+ /*--------------------------------------------------------------------------*/
+ int ffflnm(fitsfile *fptr, /* I - FITS file pointer */
+ char *filename, /* O - name of the file */
--- /dev/null
+--- cfitsio-4.2.0/fitsio.h 2022-10-31 14:40:23.000000000 -0400
++++ cfitsio-4.2.0/fitsio.h 2023-07-14 11:48:10.102506229 -0400
+@@ -811,6 +810,10 @@
+ /*---------------- utility routines -------------*/
+
+ float CFITS_API ffvers(float *version);
++#ifndef _FITSIO_H_FITS_USE_STANDARD_STRINGS
++#define _FITSIO_H_FITS_USE_STANDARD_STRINGS
++int CFITS_API fits_use_standard_strings(void);
++#endif
+ void CFITS_API ffupch(char *string);
+ void CFITS_API ffgerr(int status, char *errtext);
+ void CFITS_API ffpmsg(const char *err_message);
--- /dev/null
+--- cfitsio-4.2.0/fitsio2.h 2023-08-09 10:23:45.508392645 +0800
++++ cfitsio-4.2.0/fitsio2.h 2023-08-09 10:29:44.960511085 +0800
+@@ -151,6 +151,18 @@
+ # error "can't handle long size given by __riscv_xlen"
+ # endif
+
++#elif defined(__loongarch__)
++
++#define BYTESWAPPED TRUE
++
++# if __loongarch_grlen == 32
++# define LONGSIZE 32
++# elif __loongarch_grlen == 64
++# define LONGSIZE 64
++# else
++# error "can't handle long size given by __loongarch_grlen"
++# endif
++
+ /* ============================================================== */
+ /* the following are all 32-bit byteswapped platforms */
+
--- /dev/null
+--- cfitsio-4.2.0/putcols.c 2022-10-31 14:40:24.000000000 -0400
++++ cfitsio-4.2.0/putcols.c 2023-07-14 11:49:03.954820768 -0400
+@@ -157,7 +157,16 @@
+
+ for (;jj < twidth; jj++) /* fill field with blanks, if needed */
+ {
+- *buffer = ' ';
++ if ( fits_use_standard_strings() ) {
++ if (snull[0] == ASCII_NULL_UNDEFINED) {
++ *buffer = ' ';
++ } else {
++ *buffer = '\0';
++ }
++ } else {
++ *buffer = ' ';
++ }
++
+ buffer++;
+ }
+
--- /dev/null
+[egg_info]
+tag_build =
+tag_date = 0
+
--- /dev/null
+#
+# setup script for fitsio, using setuptools
+#
+# c.f.
+# https://packaging.python.org/guides/distributing-packages-using-setuptools/
+
+from __future__ import print_function
+from setuptools import setup, Extension, find_packages
+from setuptools.command.build_ext import build_ext
+
+import sys
+import os
+import subprocess
+from subprocess import Popen, PIPE
+import glob
+import shutil
+
+
+if "--use-system-fitsio" in sys.argv:
+ del sys.argv[sys.argv.index("--use-system-fitsio")]
+ USE_SYSTEM_FITSIO = True
+else:
+ USE_SYSTEM_FITSIO = False or "FITSIO_USE_SYSTEM_FITSIO" in os.environ
+
+if (
+ "--system-fitsio-includedir" in sys.argv
+ or any(a.startswith("--system-fitsio-includedir=") for a in sys.argv)
+):
+ if "--system-fitsio-includedir" in sys.argv:
+ ind = sys.argv.index("--system-fitsio-includedir")
+ SYSTEM_FITSIO_INCLUDEDIR = sys.argv[ind+1]
+ del sys.argv[ind+1]
+ del sys.argv[ind]
+ else:
+ for ind in range(len(sys.argv)):
+ if sys.argv[ind].startswith("--system-fitsio-includedir="):
+ break
+ SYSTEM_FITSIO_INCLUDEDIR = sys.argv[ind].split("=", 1)[1]
+ del sys.argv[ind]
+else:
+ SYSTEM_FITSIO_INCLUDEDIR = os.environ.get(
+ "FITSIO_SYSTEM_FITSIO_INCLUDEDIR",
+ None,
+ )
+
+
+if (
+ "--system-fitsio-libdir" in sys.argv
+ or any(a.startswith("--system-fitsio-libdir=") for a in sys.argv)
+):
+ if "--system-fitsio-libdir" in sys.argv:
+ ind = sys.argv.index("--system-fitsio-libdir")
+ SYSTEM_FITSIO_LIBDIR = sys.argv[ind+1]
+ del sys.argv[ind+1]
+ del sys.argv[ind]
+ else:
+ for ind in range(len(sys.argv)):
+ if sys.argv[ind].startswith("--system-fitsio-libdir="):
+ break
+ SYSTEM_FITSIO_LIBDIR = sys.argv[ind].split("=", 1)[1]
+ del sys.argv[ind]
+else:
+ SYSTEM_FITSIO_LIBDIR = os.environ.get(
+ "FITSIO_SYSTEM_FITSIO_LIBDIR",
+ None,
+ )
+
+
+class build_ext_subclass(build_ext):
+ cfitsio_version = '4.4.1-20240617'
+ cfitsio_dir = 'cfitsio-%s' % cfitsio_version
+
+ def finalize_options(self):
+
+ build_ext.finalize_options(self)
+
+ self.cfitsio_build_dir = os.path.join(
+ self.build_temp, self.cfitsio_dir)
+ self.cfitsio_zlib_dir = os.path.join(
+ self.cfitsio_build_dir, 'zlib')
+ self.cfitsio_patch_dir = os.path.join(
+ self.build_temp, 'patches')
+
+ if USE_SYSTEM_FITSIO:
+ if SYSTEM_FITSIO_INCLUDEDIR is not None:
+ self.include_dirs.insert(0, SYSTEM_FITSIO_INCLUDEDIR)
+ if SYSTEM_FITSIO_LIBDIR is not None:
+ self.library_dirs.insert(0, SYSTEM_FITSIO_LIBDIR)
+ else:
+ # We defer configuration of the bundled cfitsio to build_extensions
+ # because we will know the compiler there.
+ self.include_dirs.insert(0, self.cfitsio_build_dir)
+
+ def run(self):
+ # For extensions that require 'numpy' in their include dirs,
+ # replace 'numpy' with the actual paths
+ import numpy
+ np_include = numpy.get_include()
+
+ for extension in self.extensions:
+ if 'numpy' in extension.include_dirs:
+ idx = extension.include_dirs.index('numpy')
+ extension.include_dirs.insert(idx, np_include)
+ extension.include_dirs.remove('numpy')
+
+ build_ext.run(self)
+
+ def build_extensions(self):
+ if not USE_SYSTEM_FITSIO:
+
+ # Use the compiler for building python to build cfitsio
+ # for maximized compatibility.
+
+ # turns out we need to set the include dirs here too
+ # directly for the compiler
+ self.compiler.include_dirs.insert(0, self.cfitsio_build_dir)
+
+ CCold = self.compiler.compiler
+ if 'ccache' in CCold:
+ CC = []
+ for val in CCold:
+ if val == 'ccache':
+ print("removing ccache from the compiler options")
+ continue
+
+ CC.append(val)
+ else:
+ CC = None
+
+ self.configure_cfitsio(
+ CC=CC,
+ ARCHIVE=self.compiler.archiver,
+ RANLIB=self.compiler.ranlib,
+ )
+
+ # If configure detected bzlib.h, we have to link to libbz2
+ with open(os.path.join(self.cfitsio_build_dir, 'Makefile')) as fp:
+ _makefile = fp.read()
+ if '-DHAVE_BZIP2=1' in _makefile:
+ self.compiler.add_library('bz2')
+ if '-DCFITSIO_HAVE_CURL=1' in _makefile:
+ self.compiler.add_library('curl')
+
+ self.compile_cfitsio()
+
+ # link against the .a library in cfitsio;
+ # It should have been a 'static' library of relocatable objects
+ # (-fPIC), since we use the python compiler flags
+
+ link_objects = glob.glob(
+ os.path.join(self.cfitsio_build_dir, '*.o'))
+
+ self.compiler.set_link_objects(link_objects)
+
+ # Ultimate hack: append the .a files to the dependency list
+ # so they will be properly rebuild if cfitsio source is updated.
+ for ext in self.extensions:
+ ext.depends += link_objects
+ else:
+ self.compiler.add_library('cfitsio')
+
+ # Check if system cfitsio was compiled with bzip2 and/or curl
+ if self.check_system_cfitsio_objects('bzip2'):
+ self.compiler.add_library('bz2')
+ if self.check_system_cfitsio_objects('curl_'):
+ self.compiler.add_library('curl')
+
+ # Make sure the external lib has the fits_use_standard_strings
+ # function. If not, then define a macro to tell the wrapper
+ # to always return False.
+ if not self.check_system_cfitsio_objects(
+ '_fits_use_standard_strings'):
+ self.compiler.define_macro(
+ 'FITSIO_PYWRAP_ALWAYS_NONSTANDARD_STRINGS')
+
+ self.compiler.add_library('z')
+
+ # fitsio requires libm as well.
+ self.compiler.add_library('m')
+
+ # call the original build_extensions
+
+ build_ext.build_extensions(self)
+
+ def patch_cfitsio(self):
+ patches = glob.glob('%s/*.patch' % self.cfitsio_patch_dir)
+ for patch in patches:
+ fname = os.path.basename(patch.replace('.patch', ''))
+ try:
+ subprocess.check_call(
+ 'patch -N --dry-run %s/%s %s' % (
+ self.cfitsio_build_dir, fname, patch),
+ shell=True)
+ except subprocess.CalledProcessError:
+ pass
+ else:
+ subprocess.check_call(
+ 'patch %s/%s %s' % (
+ self.cfitsio_build_dir, fname, patch),
+ shell=True)
+
+ def configure_cfitsio(self, CC=None, ARCHIVE=None, RANLIB=None):
+
+ # prepare source code and run configure
+ def copy_update(dir1, dir2):
+ f1 = os.listdir(dir1)
+ for f in f1:
+ path1 = os.path.join(dir1, f)
+ path2 = os.path.join(dir2, f)
+
+ if os.path.isdir(path1):
+ if not os.path.exists(path2):
+ os.makedirs(path2)
+ copy_update(path1, path2)
+ else:
+ if not os.path.exists(path2):
+ shutil.copy(path1, path2)
+ else:
+ stat1 = os.stat(path1)
+ stat2 = os.stat(path2)
+ if (stat1.st_mtime > stat2.st_mtime):
+ shutil.copy(path1, path2)
+
+ if not os.path.exists('build'):
+ os.makedirs('build')
+
+ if not os.path.exists(self.cfitsio_build_dir):
+ os.makedirs(self.cfitsio_build_dir)
+
+ if not os.path.exists(self.cfitsio_patch_dir):
+ os.makedirs(self.cfitsio_patch_dir)
+
+ copy_update(self.cfitsio_dir, self.cfitsio_build_dir)
+ copy_update('zlib', self.cfitsio_build_dir)
+ copy_update('patches', self.cfitsio_patch_dir)
+
+ # we patch the source in the buil dir to avoid mucking with the repo
+ self.patch_cfitsio()
+
+ makefile = os.path.join(self.cfitsio_build_dir, 'Makefile')
+
+ if os.path.exists(makefile):
+ # Makefile already there
+ print("found Makefile so not running configure!", flush=True)
+ return
+
+ args = ''
+
+ if "FITSIO_BZIP2_DIR" in os.environ:
+ args += ' --with-bzip2="%s"' % os.environ["FITSIO_BZIP2_DIR"]
+ else:
+ args += ' --with-bzip2'
+
+ if CC is not None:
+ args += ' CC="%s"' % ' '.join(CC[:1])
+ args += ' CFLAGS="%s -fvisibility=hidden"' % ' '.join(CC[1:])
+ else:
+ args += ' CFLAGS="${CFLAGS} -fvisibility=hidden"'
+
+ if ARCHIVE:
+ args += ' ARCHIVE="%s"' % ' '.join(ARCHIVE)
+ if RANLIB:
+ args += ' RANLIB="%s"' % ' '.join(RANLIB)
+
+ p = Popen(
+ "sh ./configure --enable-standard-strings " + args,
+ shell=True,
+ cwd=self.cfitsio_build_dir,
+ )
+ p.wait()
+ if p.returncode != 0:
+ raise ValueError(
+ "could not configure cfitsio %s" % self.cfitsio_version)
+
+ def compile_cfitsio(self):
+ p = Popen(
+ "make",
+ shell=True,
+ cwd=self.cfitsio_build_dir,
+ )
+ p.wait()
+ if p.returncode != 0:
+ raise ValueError(
+ "could not compile cfitsio %s" % self.cfitsio_version)
+
+ def check_system_cfitsio_objects(self, obj_name):
+ for lib_dir in self.library_dirs:
+ if os.path.isfile('%s/libcfitsio.a' % (lib_dir)):
+ p = Popen(
+ "nm -g %s/libcfitsio.a | grep %s" % (lib_dir, obj_name),
+ shell=True,
+ stdout=PIPE,
+ stderr=PIPE,
+ )
+ if len(p.stdout.read()) > 0:
+ return True
+ else:
+ return False
+ return False
+
+
+sources = ["fitsio/fitsio_pywrap.c"]
+
+ext = Extension("fitsio._fitsio_wrap", sources, include_dirs=['numpy'])
+
+description = ("A full featured python library to read from and "
+ "write to FITS files.")
+
+with open(os.path.join(os.path.dirname(__file__), "README.md")) as fp:
+ long_description = fp.read()
+
+classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ "Topic :: Scientific/Engineering :: Astronomy",
+ "Intended Audience :: Science/Research",
+]
+
+setup(
+ name="fitsio",
+ version="1.2.5",
+ description=description,
+ long_description=long_description,
+ long_description_content_type='text/markdown; charset=UTF-8; variant=GFM',
+ license="GPL",
+ classifiers=classifiers,
+ url="https://github.com/esheldon/fitsio",
+ author="Erin Scott Sheldon",
+ author_email="erin.sheldon@gmail.com",
+ setup_requires=['numpy'],
+ install_requires=['numpy'],
+ packages=find_packages(),
+ include_package_data=True,
+ ext_modules=[ext],
+ cmdclass={"build_ext": build_ext_subclass}
+)
--- /dev/null
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.2.13, October 13th, 2022
+
+ Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+*/
--- /dev/null
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995-2007 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+
+#define local static
+
+local uLong adler32_combine_(uLong adler1, uLong adler2, z_off64_t len2);
+
+#define BASE 65521UL /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;}
+#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
+#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
+#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
+#define DO16(buf) DO8(buf,0); DO8(buf,8);
+
+/* use NO_DIVIDE if your processor does not do division in hardware */
+#ifdef NO_DIVIDE
+# define MOD(a) \
+ do { \
+ if (a >= (BASE << 16)) a -= (BASE << 16); \
+ if (a >= (BASE << 15)) a -= (BASE << 15); \
+ if (a >= (BASE << 14)) a -= (BASE << 14); \
+ if (a >= (BASE << 13)) a -= (BASE << 13); \
+ if (a >= (BASE << 12)) a -= (BASE << 12); \
+ if (a >= (BASE << 11)) a -= (BASE << 11); \
+ if (a >= (BASE << 10)) a -= (BASE << 10); \
+ if (a >= (BASE << 9)) a -= (BASE << 9); \
+ if (a >= (BASE << 8)) a -= (BASE << 8); \
+ if (a >= (BASE << 7)) a -= (BASE << 7); \
+ if (a >= (BASE << 6)) a -= (BASE << 6); \
+ if (a >= (BASE << 5)) a -= (BASE << 5); \
+ if (a >= (BASE << 4)) a -= (BASE << 4); \
+ if (a >= (BASE << 3)) a -= (BASE << 3); \
+ if (a >= (BASE << 2)) a -= (BASE << 2); \
+ if (a >= (BASE << 1)) a -= (BASE << 1); \
+ if (a >= BASE) a -= BASE; \
+ } while (0)
+# define MOD4(a) \
+ do { \
+ if (a >= (BASE << 4)) a -= (BASE << 4); \
+ if (a >= (BASE << 3)) a -= (BASE << 3); \
+ if (a >= (BASE << 2)) a -= (BASE << 2); \
+ if (a >= (BASE << 1)) a -= (BASE << 1); \
+ if (a >= BASE) a -= BASE; \
+ } while (0)
+#else
+# define MOD(a) a %= BASE
+# define MOD4(a) a %= BASE
+#endif
+
+/* ========================================================================= */
+uLong ZEXPORT adler32(adler, buf, len)
+ uLong adler;
+ const Bytef *buf;
+ uInt len;
+{
+ unsigned long sum2;
+ unsigned n;
+
+ /* split Adler-32 into component sums */
+ sum2 = (adler >> 16) & 0xffff;
+ adler &= 0xffff;
+
+ /* in case user likes doing a byte at a time, keep it fast */
+ if (len == 1) {
+ adler += buf[0];
+ if (adler >= BASE)
+ adler -= BASE;
+ sum2 += adler;
+ if (sum2 >= BASE)
+ sum2 -= BASE;
+ return adler | (sum2 << 16);
+ }
+
+ /* initial Adler-32 value (deferred check for len == 1 speed) */
+ if (buf == Z_NULL)
+ return 1L;
+
+ /* in case short lengths are provided, keep it somewhat fast */
+ if (len < 16) {
+ while (len--) {
+ adler += *buf++;
+ sum2 += adler;
+ }
+ if (adler >= BASE)
+ adler -= BASE;
+ MOD4(sum2); /* only added so many BASE's */
+ return adler | (sum2 << 16);
+ }
+
+ /* do length NMAX blocks -- requires just one modulo operation */
+ while (len >= NMAX) {
+ len -= NMAX;
+ n = NMAX / 16; /* NMAX is divisible by 16 */
+ do {
+ DO16(buf); /* 16 sums unrolled */
+ buf += 16;
+ } while (--n);
+ MOD(adler);
+ MOD(sum2);
+ }
+
+ /* do remaining bytes (less than NMAX, still just one modulo) */
+ if (len) { /* avoid modulos if none remaining */
+ while (len >= 16) {
+ len -= 16;
+ DO16(buf);
+ buf += 16;
+ }
+ while (len--) {
+ adler += *buf++;
+ sum2 += adler;
+ }
+ MOD(adler);
+ MOD(sum2);
+ }
+
+ /* return recombined sums */
+ return adler | (sum2 << 16);
+}
+
+/* ========================================================================= */
+local uLong adler32_combine_(adler1, adler2, len2)
+ uLong adler1;
+ uLong adler2;
+ z_off64_t len2;
+{
+ unsigned long sum1;
+ unsigned long sum2;
+ unsigned rem;
+
+ /* the derivation of this formula is left as an exercise for the reader */
+ rem = (unsigned)(len2 % BASE);
+ sum1 = adler1 & 0xffff;
+ sum2 = rem * sum1;
+ MOD(sum2);
+ sum1 += (adler2 & 0xffff) + BASE - 1;
+ sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem;
+ if (sum1 >= BASE) sum1 -= BASE;
+ if (sum1 >= BASE) sum1 -= BASE;
+ if (sum2 >= (BASE << 1)) sum2 -= (BASE << 1);
+ if (sum2 >= BASE) sum2 -= BASE;
+ return sum1 | (sum2 << 16);
+}
+
+/* ========================================================================= */
+uLong ZEXPORT adler32_combine(adler1, adler2, len2)
+ uLong adler1;
+ uLong adler2;
+ z_off_t len2;
+{
+ return adler32_combine_(adler1, adler2, len2);
+}
+
+uLong ZEXPORT adler32_combine64(adler1, adler2, len2)
+ uLong adler1;
+ uLong adler2;
+ z_off64_t len2;
+{
+ return adler32_combine_(adler1, adler2, len2);
+}
--- /dev/null
+/* crc32.c -- compute the CRC-32 of a data stream
+ * Copyright (C) 1995-2006, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ *
+ * Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster
+ * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing
+ * tables for updating the shift register in one step with three exclusive-ors
+ * instead of four steps with four exclusive-ors. This results in about a
+ * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3.
+ */
+
+/*
+ Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore
+ protection on the static variables used to control the first-use generation
+ of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should
+ first call get_crc_table() to initialize the tables before allowing more than
+ one thread to use crc32().
+ */
+
+#ifdef MAKECRCH
+# include <stdio.h>
+# ifndef DYNAMIC_CRC_TABLE
+# define DYNAMIC_CRC_TABLE
+# endif /* !DYNAMIC_CRC_TABLE */
+#endif /* MAKECRCH */
+
+#include "zutil.h" /* for STDC and FAR definitions */
+
+#define local static
+
+/* Find a four-byte integer type for crc32_little() and crc32_big(). */
+#ifndef NOBYFOUR
+# ifdef STDC /* need ANSI C limits.h to determine sizes */
+# include <limits.h>
+# define BYFOUR
+# if (UINT_MAX == 0xffffffffUL)
+ typedef unsigned int u4;
+# else
+# if (ULONG_MAX == 0xffffffffUL)
+ typedef unsigned long u4;
+# else
+# if (USHRT_MAX == 0xffffffffUL)
+ typedef unsigned short u4;
+# else
+# undef BYFOUR /* can't find a four-byte integer type! */
+# endif
+# endif
+# endif
+# endif /* STDC */
+#endif /* !NOBYFOUR */
+
+/* Definitions for doing the crc four data bytes at a time. */
+#ifdef BYFOUR
+# define REV(w) ((((w)>>24)&0xff)+(((w)>>8)&0xff00)+ \
+ (((w)&0xff00)<<8)+(((w)&0xff)<<24))
+ local unsigned long crc32_little OF((unsigned long,
+ const unsigned char FAR *, unsigned));
+ local unsigned long crc32_big OF((unsigned long,
+ const unsigned char FAR *, unsigned));
+# define TBLS 8
+#else
+# define TBLS 1
+#endif /* BYFOUR */
+
+/* Local functions for crc concatenation */
+local unsigned long gf2_matrix_times OF((unsigned long *mat,
+ unsigned long vec));
+local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat));
+local uLong crc32_combine_(uLong crc1, uLong crc2, z_off64_t len2);
+
+
+#ifdef DYNAMIC_CRC_TABLE
+
+local volatile int crc_table_empty = 1;
+local unsigned long FAR crc_table[TBLS][256];
+local void make_crc_table OF((void));
+#ifdef MAKECRCH
+ local void write_table OF((FILE *, const unsigned long FAR *));
+#endif /* MAKECRCH */
+/*
+ Generate tables for a byte-wise 32-bit CRC calculation on the polynomial:
+ x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1.
+
+ Polynomials over GF(2) are represented in binary, one bit per coefficient,
+ with the lowest powers in the most significant bit. Then adding polynomials
+ is just exclusive-or, and multiplying a polynomial by x is a right shift by
+ one. If we call the above polynomial p, and represent a byte as the
+ polynomial q, also with the lowest power in the most significant bit (so the
+ byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p,
+ where a mod b means the remainder after dividing a by b.
+
+ This calculation is done using the shift-register method of multiplying and
+ taking the remainder. The register is initialized to zero, and for each
+ incoming bit, x^32 is added mod p to the register if the bit is a one (where
+ x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by
+ x (which is shifting right by one and adding x^32 mod p if the bit shifted
+ out is a one). We start with the highest power (least significant bit) of
+ q and repeat for all eight bits of q.
+
+ The first table is simply the CRC of all possible eight bit values. This is
+ all the information needed to generate CRCs on data a byte at a time for all
+ combinations of CRC register values and incoming bytes. The remaining tables
+ allow for word-at-a-time CRC calculation for both big-endian and little-
+ endian machines, where a word is four bytes.
+*/
+local void make_crc_table()
+{
+ unsigned long c;
+ int n, k;
+ unsigned long poly; /* polynomial exclusive-or pattern */
+ /* terms of polynomial defining this crc (except x^32): */
+ static volatile int first = 1; /* flag to limit concurrent making */
+ static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
+
+ /* See if another task is already doing this (not thread-safe, but better
+ than nothing -- significantly reduces duration of vulnerability in
+ case the advice about DYNAMIC_CRC_TABLE is ignored) */
+ if (first) {
+ first = 0;
+
+ /* make exclusive-or pattern from polynomial (0xedb88320UL) */
+ poly = 0UL;
+ for (n = 0; n < sizeof(p)/sizeof(unsigned char); n++)
+ poly |= 1UL << (31 - p[n]);
+
+ /* generate a crc for every 8-bit value */
+ for (n = 0; n < 256; n++) {
+ c = (unsigned long)n;
+ for (k = 0; k < 8; k++)
+ c = c & 1 ? poly ^ (c >> 1) : c >> 1;
+ crc_table[0][n] = c;
+ }
+
+#ifdef BYFOUR
+ /* generate crc for each value followed by one, two, and three zeros,
+ and then the byte reversal of those as well as the first table */
+ for (n = 0; n < 256; n++) {
+ c = crc_table[0][n];
+ crc_table[4][n] = REV(c);
+ for (k = 1; k < 4; k++) {
+ c = crc_table[0][c & 0xff] ^ (c >> 8);
+ crc_table[k][n] = c;
+ crc_table[k + 4][n] = REV(c);
+ }
+ }
+#endif /* BYFOUR */
+
+ crc_table_empty = 0;
+ }
+ else { /* not first */
+ /* wait for the other guy to finish (not efficient, but rare) */
+ while (crc_table_empty)
+ ;
+ }
+
+#ifdef MAKECRCH
+ /* write out CRC tables to crc32.h */
+ {
+ FILE *out;
+
+ out = fopen("crc32.h", "w");
+ if (out == NULL) return;
+ fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n");
+ fprintf(out, " * Generated automatically by crc32.c\n */\n\n");
+ fprintf(out, "local const unsigned long FAR ");
+ fprintf(out, "crc_table[TBLS][256] =\n{\n {\n");
+ write_table(out, crc_table[0]);
+# ifdef BYFOUR
+ fprintf(out, "#ifdef BYFOUR\n");
+ for (k = 1; k < 8; k++) {
+ fprintf(out, " },\n {\n");
+ write_table(out, crc_table[k]);
+ }
+ fprintf(out, "#endif\n");
+# endif /* BYFOUR */
+ fprintf(out, " }\n};\n");
+ fclose(out);
+ }
+#endif /* MAKECRCH */
+}
+
+#ifdef MAKECRCH
+local void write_table(out, table)
+ FILE *out;
+ const unsigned long FAR *table;
+{
+ int n;
+
+ for (n = 0; n < 256; n++)
+ fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", table[n],
+ n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", "));
+}
+#endif /* MAKECRCH */
+
+#else /* !DYNAMIC_CRC_TABLE */
+/* ========================================================================
+ * Tables of CRC-32s of all single-byte values, made by make_crc_table().
+ */
+#include "crc32.h"
+#endif /* DYNAMIC_CRC_TABLE */
+
+/* =========================================================================
+ * This function can be used by asm versions of crc32()
+ */
+const unsigned long FAR * ZEXPORT get_crc_table()
+{
+#ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty)
+ make_crc_table();
+#endif /* DYNAMIC_CRC_TABLE */
+ return (const unsigned long FAR *)crc_table;
+}
+
+/* ========================================================================= */
+#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8)
+#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1
+
+/* ========================================================================= */
+unsigned long ZEXPORT crc32(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ uInt len;
+{
+ if (buf == Z_NULL) return 0UL;
+
+#ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty)
+ make_crc_table();
+#endif /* DYNAMIC_CRC_TABLE */
+
+#ifdef BYFOUR
+ if (sizeof(void *) == sizeof(ptrdiff_t)) {
+ u4 endian;
+
+ endian = 1;
+ if (*((unsigned char *)(&endian)))
+ return crc32_little(crc, buf, len);
+ else
+ return crc32_big(crc, buf, len);
+ }
+#endif /* BYFOUR */
+ crc = crc ^ 0xffffffffUL;
+ while (len >= 8) {
+ DO8;
+ len -= 8;
+ }
+ if (len) do {
+ DO1;
+ } while (--len);
+ return crc ^ 0xffffffffUL;
+}
+
+#ifdef BYFOUR
+
+/* ========================================================================= */
+#define DOLIT4 c ^= *buf4++; \
+ c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \
+ crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]
+#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4
+
+/* ========================================================================= */
+local unsigned long crc32_little(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ unsigned len;
+{
+ register u4 c;
+ register const u4 FAR *buf4;
+
+ c = (u4)crc;
+ c = ~c;
+ while (len && ((ptrdiff_t)buf & 3)) {
+ c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
+ len--;
+ }
+
+ buf4 = (const u4 FAR *)(const void FAR *)buf;
+ while (len >= 32) {
+ DOLIT32;
+ len -= 32;
+ }
+ while (len >= 4) {
+ DOLIT4;
+ len -= 4;
+ }
+ buf = (const unsigned char FAR *)buf4;
+
+ if (len) do {
+ c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
+ } while (--len);
+ c = ~c;
+ return (unsigned long)c;
+}
+
+/* ========================================================================= */
+#define DOBIG4 c ^= *++buf4; \
+ c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
+ crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
+#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
+
+/* ========================================================================= */
+local unsigned long crc32_big(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ unsigned len;
+{
+ register u4 c;
+ register const u4 FAR *buf4;
+
+ c = REV((u4)crc);
+ c = ~c;
+ while (len && ((ptrdiff_t)buf & 3)) {
+ c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
+ len--;
+ }
+
+ buf4 = (const u4 FAR *)(const void FAR *)buf;
+ buf4--;
+ while (len >= 32) {
+ DOBIG32;
+ len -= 32;
+ }
+ while (len >= 4) {
+ DOBIG4;
+ len -= 4;
+ }
+ buf4++;
+ buf = (const unsigned char FAR *)buf4;
+
+ if (len) do {
+ c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
+ } while (--len);
+ c = ~c;
+ return (unsigned long)(REV(c));
+}
+
+#endif /* BYFOUR */
+
+#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */
+
+/* ========================================================================= */
+local unsigned long gf2_matrix_times(mat, vec)
+ unsigned long *mat;
+ unsigned long vec;
+{
+ unsigned long sum;
+
+ sum = 0;
+ while (vec) {
+ if (vec & 1)
+ sum ^= *mat;
+ vec >>= 1;
+ mat++;
+ }
+ return sum;
+}
+
+/* ========================================================================= */
+local void gf2_matrix_square(square, mat)
+ unsigned long *square;
+ unsigned long *mat;
+{
+ int n;
+
+ for (n = 0; n < GF2_DIM; n++)
+ square[n] = gf2_matrix_times(mat, mat[n]);
+}
+
+/* ========================================================================= */
+local uLong crc32_combine_(crc1, crc2, len2)
+ uLong crc1;
+ uLong crc2;
+ z_off64_t len2;
+{
+ int n;
+ unsigned long row;
+ unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */
+ unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */
+
+ /* degenerate case (also disallow negative lengths) */
+ if (len2 <= 0)
+ return crc1;
+
+ /* put operator for one zero bit in odd */
+ odd[0] = 0xedb88320UL; /* CRC-32 polynomial */
+ row = 1;
+ for (n = 1; n < GF2_DIM; n++) {
+ odd[n] = row;
+ row <<= 1;
+ }
+
+ /* put operator for two zero bits in even */
+ gf2_matrix_square(even, odd);
+
+ /* put operator for four zero bits in odd */
+ gf2_matrix_square(odd, even);
+
+ /* apply len2 zeros to crc1 (first square will put the operator for one
+ zero byte, eight zero bits, in even) */
+ do {
+ /* apply zeros operator for this bit of len2 */
+ gf2_matrix_square(even, odd);
+ if (len2 & 1)
+ crc1 = gf2_matrix_times(even, crc1);
+ len2 >>= 1;
+
+ /* if no more bits set, then done */
+ if (len2 == 0)
+ break;
+
+ /* another iteration of the loop with odd and even swapped */
+ gf2_matrix_square(odd, even);
+ if (len2 & 1)
+ crc1 = gf2_matrix_times(odd, crc1);
+ len2 >>= 1;
+
+ /* if no more bits set, then done */
+ } while (len2 != 0);
+
+ /* return combined crc */
+ crc1 ^= crc2;
+ return crc1;
+}
+
+/* ========================================================================= */
+uLong ZEXPORT crc32_combine(crc1, crc2, len2)
+ uLong crc1;
+ uLong crc2;
+ z_off_t len2;
+{
+ return crc32_combine_(crc1, crc2, len2);
+}
+
+uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
+ uLong crc1;
+ uLong crc2;
+ z_off64_t len2;
+{
+ return crc32_combine_(crc1, crc2, len2);
+}
--- /dev/null
+/* crc32.h -- tables for rapid CRC calculation
+ * Generated automatically by crc32.c
+ */
+
+local const unsigned long FAR crc_table[TBLS][256] =
+{
+ {
+ 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
+ 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
+ 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
+ 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
+ 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
+ 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
+ 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
+ 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
+ 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
+ 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
+ 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
+ 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
+ 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
+ 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
+ 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
+ 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
+ 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
+ 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
+ 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
+ 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
+ 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
+ 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
+ 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
+ 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
+ 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
+ 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
+ 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
+ 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
+ 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
+ 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
+ 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
+ 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
+ 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
+ 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
+ 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
+ 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
+ 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
+ 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
+ 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
+ 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
+ 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
+ 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
+ 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
+ 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
+ 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
+ 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
+ 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
+ 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
+ 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
+ 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
+ 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
+ 0x2d02ef8dUL
+#ifdef BYFOUR
+ },
+ {
+ 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
+ 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
+ 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
+ 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
+ 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
+ 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
+ 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
+ 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
+ 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
+ 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
+ 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
+ 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
+ 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
+ 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
+ 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
+ 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
+ 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
+ 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
+ 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
+ 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
+ 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
+ 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
+ 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
+ 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
+ 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
+ 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
+ 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
+ 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
+ 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
+ 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
+ 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
+ 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
+ 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
+ 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
+ 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
+ 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
+ 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
+ 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
+ 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
+ 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
+ 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
+ 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
+ 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
+ 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
+ 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
+ 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
+ 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
+ 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
+ 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
+ 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
+ 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
+ 0x9324fd72UL
+ },
+ {
+ 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
+ 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
+ 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
+ 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
+ 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
+ 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
+ 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
+ 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
+ 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
+ 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
+ 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
+ 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
+ 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
+ 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
+ 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
+ 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
+ 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
+ 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
+ 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
+ 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
+ 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
+ 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
+ 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
+ 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
+ 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
+ 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
+ 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
+ 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
+ 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
+ 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
+ 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
+ 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
+ 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
+ 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
+ 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
+ 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
+ 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
+ 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
+ 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
+ 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
+ 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
+ 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
+ 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
+ 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
+ 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
+ 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
+ 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
+ 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
+ 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
+ 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
+ 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
+ 0xbe9834edUL
+ },
+ {
+ 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
+ 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
+ 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
+ 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
+ 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
+ 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
+ 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
+ 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
+ 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
+ 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
+ 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
+ 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
+ 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
+ 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
+ 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
+ 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
+ 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
+ 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
+ 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
+ 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
+ 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
+ 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
+ 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
+ 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
+ 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
+ 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
+ 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
+ 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
+ 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
+ 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
+ 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
+ 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
+ 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
+ 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
+ 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
+ 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
+ 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
+ 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
+ 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
+ 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
+ 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
+ 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
+ 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
+ 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
+ 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
+ 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
+ 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
+ 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
+ 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
+ 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
+ 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
+ 0xde0506f1UL
+ },
+ {
+ 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
+ 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
+ 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
+ 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
+ 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
+ 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
+ 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
+ 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
+ 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
+ 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
+ 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
+ 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
+ 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
+ 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
+ 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
+ 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
+ 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
+ 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
+ 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
+ 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
+ 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
+ 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
+ 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
+ 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
+ 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
+ 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
+ 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
+ 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
+ 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
+ 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
+ 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
+ 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
+ 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
+ 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
+ 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
+ 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
+ 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
+ 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
+ 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
+ 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
+ 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
+ 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
+ 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
+ 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
+ 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
+ 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
+ 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
+ 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
+ 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
+ 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
+ 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
+ 0x8def022dUL
+ },
+ {
+ 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
+ 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
+ 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
+ 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
+ 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
+ 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
+ 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
+ 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
+ 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
+ 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
+ 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
+ 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
+ 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
+ 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
+ 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
+ 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
+ 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
+ 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
+ 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
+ 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
+ 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
+ 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
+ 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
+ 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
+ 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
+ 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
+ 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
+ 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
+ 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
+ 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
+ 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
+ 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
+ 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
+ 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
+ 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
+ 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
+ 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
+ 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
+ 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
+ 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
+ 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
+ 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
+ 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
+ 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
+ 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
+ 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
+ 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
+ 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
+ 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
+ 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
+ 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
+ 0x72fd2493UL
+ },
+ {
+ 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
+ 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
+ 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
+ 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
+ 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
+ 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
+ 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
+ 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
+ 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
+ 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
+ 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
+ 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
+ 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
+ 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
+ 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
+ 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
+ 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
+ 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
+ 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
+ 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
+ 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
+ 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
+ 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
+ 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
+ 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
+ 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
+ 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
+ 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
+ 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
+ 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
+ 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
+ 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
+ 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
+ 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
+ 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
+ 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
+ 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
+ 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
+ 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
+ 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
+ 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
+ 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
+ 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
+ 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
+ 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
+ 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
+ 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
+ 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
+ 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
+ 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
+ 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
+ 0xed3498beUL
+ },
+ {
+ 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
+ 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
+ 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
+ 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
+ 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
+ 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
+ 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
+ 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
+ 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
+ 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
+ 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
+ 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
+ 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
+ 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
+ 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
+ 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
+ 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
+ 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
+ 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
+ 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
+ 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
+ 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
+ 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
+ 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
+ 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
+ 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
+ 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
+ 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
+ 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
+ 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
+ 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
+ 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
+ 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
+ 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
+ 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
+ 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
+ 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
+ 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
+ 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
+ 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
+ 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
+ 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
+ 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
+ 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
+ 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
+ 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
+ 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
+ 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
+ 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
+ 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
+ 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
+ 0xf10605deUL
+#endif
+ }
+};
--- /dev/null
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in http://www.ietf.org/rfc/rfc1951.txt
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+#include "deflate.h"
+
+const char deflate_copyright[] =
+ " deflate 1.2.5 Copyright 1995-2010 Jean-loup Gailly and Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+/* Compression function. Returns the block state after the call. */
+
+local void fill_window OF((deflate_state *s));
+local block_state deflate_stored OF((deflate_state *s, int flush));
+local block_state deflate_fast OF((deflate_state *s, int flush));
+#ifndef FASTEST
+local block_state deflate_slow OF((deflate_state *s, int flush));
+#endif
+local block_state deflate_rle OF((deflate_state *s, int flush));
+local block_state deflate_huff OF((deflate_state *s, int flush));
+local void lm_init OF((deflate_state *s));
+local void putShortMSB OF((deflate_state *s, uInt b));
+local void flush_pending OF((z_streamp strm));
+local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
+#ifdef ASMV
+ void match_init OF((void)); /* asm code initialization */
+ uInt longest_match OF((deflate_state *s, IPos cur_match));
+#else
+local uInt longest_match OF((deflate_state *s, IPos cur_match));
+#endif
+
+#ifdef DEBUG
+local void check_match OF((deflate_state *s, IPos start, IPos match,
+ int length));
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+#ifdef FASTEST
+local const config configuration_table[2] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
+#else
+local const config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
+#endif
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+#ifndef NO_DUMMY_DECL
+struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
+#endif
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * input characters, so that a running hash key can be computed from the
+ * previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * If this file is compiled with -DFASTEST, the compression level is forced
+ * to 1, and no hash chains are maintained.
+ * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * input characters and the first MIN_MATCH bytes of str are valid
+ * (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#ifdef FASTEST
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+#else
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+#endif
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int ZEXPORT deflateInit_(strm, level, version, stream_size)
+ z_streamp strm;
+ int level;
+ const char *version;
+ int stream_size;
+{
+ return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY, version, stream_size);
+ /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ version, stream_size)
+ z_streamp strm;
+ int level;
+ int method;
+ int windowBits;
+ int memLevel;
+ int strategy;
+ const char *version;
+ int stream_size;
+{
+ deflate_state *s;
+ int wrap = 1;
+ static const char my_version[] = ZLIB_VERSION;
+
+ ushf *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+ }
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->msg = Z_NULL;
+ if (strm->zalloc == (alloc_func)0) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+ }
+ if (strm->zfree == (free_func)0) strm->zfree = zcfree;
+
+#ifdef FASTEST
+ if (level != 0) level = 1;
+#else
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+#endif
+
+ if (windowBits < 0) { /* suppress zlib wrapper */
+ wrap = 0;
+ windowBits = -windowBits;
+ }
+#ifdef GZIP
+ else if (windowBits > 15) {
+ wrap = 2; /* write gzip wrapper instead */
+ windowBits -= 16;
+ }
+#endif
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_FIXED) {
+ return Z_STREAM_ERROR;
+ }
+ if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
+ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
+ if (s == Z_NULL) return Z_MEM_ERROR;
+ strm->state = (struct internal_state FAR *)s;
+ s->strm = strm;
+
+ s->wrap = wrap;
+ s->gzhead = Z_NULL;
+ s->w_bits = windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
+ s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
+
+ s->high_water = 0; /* nothing written to s->window yet */
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
+ s->pending_buf = (uchf *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
+ s->pending_buf == Z_NULL) {
+ s->status = FINISH_STATE;
+ strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
+ deflateEnd (strm);
+ return Z_MEM_ERROR;
+ }
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return deflateReset(strm);
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ const Bytef *dictionary;
+ uInt dictLength;
+{
+ deflate_state *s;
+ uInt length = dictLength;
+ uInt n;
+ IPos hash_head = 0;
+
+ if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL ||
+ strm->state->wrap == 2 ||
+ (strm->state->wrap == 1 && strm->state->status != INIT_STATE))
+ return Z_STREAM_ERROR;
+
+ s = strm->state;
+ if (s->wrap)
+ strm->adler = adler32(strm->adler, dictionary, dictLength);
+
+ if (length < MIN_MATCH) return Z_OK;
+ if (length > s->w_size) {
+ length = s->w_size;
+ dictionary += dictLength - length; /* use the tail of the dictionary */
+ }
+ zmemcpy(s->window, dictionary, length);
+ s->strstart = length;
+ s->block_start = (long)length;
+
+ /* Insert all strings in the hash table (except for the last two bytes).
+ * s->lookahead stays null, so s->ins_h will be recomputed at the next
+ * call of fill_window.
+ */
+ s->ins_h = s->window[0];
+ UPDATE_HASH(s, s->ins_h, s->window[1]);
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ INSERT_STRING(s, n, hash_head);
+ }
+ if (hash_head) hash_head = 0; /* to make compiler happy */
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateReset (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) {
+ return Z_STREAM_ERROR;
+ }
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->wrap < 0) {
+ s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
+ }
+ s->status = s->wrap ? INIT_STATE : BUSY_STATE;
+ strm->adler =
+#ifdef GZIP
+ s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
+#endif
+ adler32(0L, Z_NULL, 0);
+ s->last_flush = Z_NO_FLUSH;
+
+ _tr_init(s);
+ lm_init(s);
+
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateSetHeader (strm, head)
+ z_streamp strm;
+ gz_headerp head;
+{
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ if (strm->state->wrap != 2) return Z_STREAM_ERROR;
+ strm->state->gzhead = head;
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflatePrime (strm, bits, value)
+ z_streamp strm;
+ int bits;
+ int value;
+{
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ strm->state->bi_valid = bits;
+ strm->state->bi_buf = (ush)(value & ((1 << bits) - 1));
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateParams(strm, level, strategy)
+ z_streamp strm;
+ int level;
+ int strategy;
+{
+ deflate_state *s;
+ compress_func func;
+ int err = Z_OK;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = strm->state;
+
+#ifdef FASTEST
+ if (level != 0) level = 1;
+#else
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+#endif
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
+ return Z_STREAM_ERROR;
+ }
+ func = configuration_table[s->level].func;
+
+ if ((strategy != s->strategy || func != configuration_table[level].func) &&
+ strm->total_in != 0) {
+ /* Flush the last buffer: */
+ err = deflate(strm, Z_BLOCK);
+ }
+ if (s->level != level) {
+ s->level = level;
+ s->max_lazy_match = configuration_table[level].max_lazy;
+ s->good_match = configuration_table[level].good_length;
+ s->nice_match = configuration_table[level].nice_length;
+ s->max_chain_length = configuration_table[level].max_chain;
+ }
+ s->strategy = strategy;
+ return err;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
+ z_streamp strm;
+ int good_length;
+ int max_lazy;
+ int nice_length;
+ int max_chain;
+{
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = strm->state;
+ s->good_match = good_length;
+ s->max_lazy_match = max_lazy;
+ s->nice_match = nice_length;
+ s->max_chain_length = max_chain;
+ return Z_OK;
+}
+
+/* =========================================================================
+ * For the default windowBits of 15 and memLevel of 8, this function returns
+ * a close to exact, as well as small, upper bound on the compressed size.
+ * They are coded as constants here for a reason--if the #define's are
+ * changed, then this function needs to be changed as well. The return
+ * value for 15 and 8 only works for those exact settings.
+ *
+ * For any setting other than those defaults for windowBits and memLevel,
+ * the value returned is a conservative worst case for the maximum expansion
+ * resulting from using fixed blocks instead of stored blocks, which deflate
+ * can emit on compressed data for some combinations of the parameters.
+ *
+ * This function could be more sophisticated to provide closer upper bounds for
+ * every combination of windowBits and memLevel. But even the conservative
+ * upper bound of about 14% expansion does not seem onerous for output buffer
+ * allocation.
+ */
+uLong ZEXPORT deflateBound(strm, sourceLen)
+ z_streamp strm;
+ uLong sourceLen;
+{
+ deflate_state *s;
+ uLong complen, wraplen;
+ Bytef *str;
+
+ /* conservative upper bound for compressed data */
+ complen = sourceLen +
+ ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
+
+ /* if can't get parameters, return conservative bound plus zlib wrapper */
+ if (strm == Z_NULL || strm->state == Z_NULL)
+ return complen + 6;
+
+ /* compute wrapper length */
+ s = strm->state;
+ switch (s->wrap) {
+ case 0: /* raw deflate */
+ wraplen = 0;
+ break;
+ case 1: /* zlib wrapper */
+ wraplen = 6 + (s->strstart ? 4 : 0);
+ break;
+ case 2: /* gzip wrapper */
+ wraplen = 18;
+ if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
+ if (s->gzhead->extra != Z_NULL)
+ wraplen += 2 + s->gzhead->extra_len;
+ str = s->gzhead->name;
+ if (str != Z_NULL)
+ do {
+ wraplen++;
+ } while (*str++);
+ str = s->gzhead->comment;
+ if (str != Z_NULL)
+ do {
+ wraplen++;
+ } while (*str++);
+ if (s->gzhead->hcrc)
+ wraplen += 2;
+ }
+ break;
+ default: /* for compiler happiness */
+ wraplen = 6;
+ }
+
+ /* if not default parameters, return conservative bound */
+ if (s->w_bits != 15 || s->hash_bits != 8 + 7)
+ return complen + wraplen;
+
+ /* default settings: return tight bound for that case */
+ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
+ (sourceLen >> 25) + 13 - 6 + wraplen;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+local void putShortMSB (s, b)
+ deflate_state *s;
+ uInt b;
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+local void flush_pending(strm)
+ z_streamp strm;
+{
+ unsigned len = strm->state->pending;
+
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ zmemcpy(strm->next_out, strm->state->pending_out, len);
+ strm->next_out += len;
+ strm->state->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ strm->state->pending -= len;
+ if (strm->state->pending == 0) {
+ strm->state->pending_out = strm->state->pending_buf;
+ }
+}
+
+/* ========================================================================= */
+int ZEXPORT deflate (strm, flush)
+ z_streamp strm;
+ int flush;
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ flush > Z_BLOCK || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = strm->state;
+
+ if (strm->next_out == Z_NULL ||
+ (strm->next_in == Z_NULL && strm->avail_in != 0) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ ERR_RETURN(strm, Z_STREAM_ERROR);
+ }
+ if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
+
+ s->strm = strm; /* just in case */
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Write the header */
+ if (s->status == INIT_STATE) {
+#ifdef GZIP
+ if (s->wrap == 2) {
+ strm->adler = crc32(0L, Z_NULL, 0);
+ put_byte(s, 31);
+ put_byte(s, 139);
+ put_byte(s, 8);
+ if (s->gzhead == Z_NULL) {
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, s->level == 9 ? 2 :
+ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
+ 4 : 0));
+ put_byte(s, OS_CODE);
+ s->status = BUSY_STATE;
+ }
+ else {
+ put_byte(s, (s->gzhead->text ? 1 : 0) +
+ (s->gzhead->hcrc ? 2 : 0) +
+ (s->gzhead->extra == Z_NULL ? 0 : 4) +
+ (s->gzhead->name == Z_NULL ? 0 : 8) +
+ (s->gzhead->comment == Z_NULL ? 0 : 16)
+ );
+ put_byte(s, (Byte)(s->gzhead->time & 0xff));
+ put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
+ put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
+ put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
+ put_byte(s, s->level == 9 ? 2 :
+ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
+ 4 : 0));
+ put_byte(s, s->gzhead->os & 0xff);
+ if (s->gzhead->extra != Z_NULL) {
+ put_byte(s, s->gzhead->extra_len & 0xff);
+ put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
+ }
+ if (s->gzhead->hcrc)
+ strm->adler = crc32(strm->adler, s->pending_buf,
+ s->pending);
+ s->gzindex = 0;
+ s->status = EXTRA_STATE;
+ }
+ }
+ else
+#endif
+ {
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags;
+
+ if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
+ level_flags = 0;
+ else if (s->level < 6)
+ level_flags = 1;
+ else if (s->level == 6)
+ level_flags = 2;
+ else
+ level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ s->status = BUSY_STATE;
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = adler32(0L, Z_NULL, 0);
+ }
+ }
+#ifdef GZIP
+ if (s->status == EXTRA_STATE) {
+ if (s->gzhead->extra != Z_NULL) {
+ uInt beg = s->pending; /* start of bytes to update crc */
+
+ while (s->gzindex < (s->gzhead->extra_len & 0xffff)) {
+ if (s->pending == s->pending_buf_size) {
+ if (s->gzhead->hcrc && s->pending > beg)
+ strm->adler = crc32(strm->adler, s->pending_buf + beg,
+ s->pending - beg);
+ flush_pending(strm);
+ beg = s->pending;
+ if (s->pending == s->pending_buf_size)
+ break;
+ }
+ put_byte(s, s->gzhead->extra[s->gzindex]);
+ s->gzindex++;
+ }
+ if (s->gzhead->hcrc && s->pending > beg)
+ strm->adler = crc32(strm->adler, s->pending_buf + beg,
+ s->pending - beg);
+ if (s->gzindex == s->gzhead->extra_len) {
+ s->gzindex = 0;
+ s->status = NAME_STATE;
+ }
+ }
+ else
+ s->status = NAME_STATE;
+ }
+ if (s->status == NAME_STATE) {
+ if (s->gzhead->name != Z_NULL) {
+ uInt beg = s->pending; /* start of bytes to update crc */
+ int val;
+
+ do {
+ if (s->pending == s->pending_buf_size) {
+ if (s->gzhead->hcrc && s->pending > beg)
+ strm->adler = crc32(strm->adler, s->pending_buf + beg,
+ s->pending - beg);
+ flush_pending(strm);
+ beg = s->pending;
+ if (s->pending == s->pending_buf_size) {
+ val = 1;
+ break;
+ }
+ }
+ val = s->gzhead->name[s->gzindex++];
+ put_byte(s, val);
+ } while (val != 0);
+ if (s->gzhead->hcrc && s->pending > beg)
+ strm->adler = crc32(strm->adler, s->pending_buf + beg,
+ s->pending - beg);
+ if (val == 0) {
+ s->gzindex = 0;
+ s->status = COMMENT_STATE;
+ }
+ }
+ else
+ s->status = COMMENT_STATE;
+ }
+ if (s->status == COMMENT_STATE) {
+ if (s->gzhead->comment != Z_NULL) {
+ uInt beg = s->pending; /* start of bytes to update crc */
+ int val;
+
+ do {
+ if (s->pending == s->pending_buf_size) {
+ if (s->gzhead->hcrc && s->pending > beg)
+ strm->adler = crc32(strm->adler, s->pending_buf + beg,
+ s->pending - beg);
+ flush_pending(strm);
+ beg = s->pending;
+ if (s->pending == s->pending_buf_size) {
+ val = 1;
+ break;
+ }
+ }
+ val = s->gzhead->comment[s->gzindex++];
+ put_byte(s, val);
+ } while (val != 0);
+ if (s->gzhead->hcrc && s->pending > beg)
+ strm->adler = crc32(strm->adler, s->pending_buf + beg,
+ s->pending - beg);
+ if (val == 0)
+ s->status = HCRC_STATE;
+ }
+ else
+ s->status = HCRC_STATE;
+ }
+ if (s->status == HCRC_STATE) {
+ if (s->gzhead->hcrc) {
+ if (s->pending + 2 > s->pending_buf_size)
+ flush_pending(strm);
+ if (s->pending + 2 <= s->pending_buf_size) {
+ put_byte(s, (Byte)(strm->adler & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
+ strm->adler = crc32(0L, Z_NULL, 0);
+ s->status = BUSY_STATE;
+ }
+ }
+ else
+ s->status = BUSY_STATE;
+ }
+#endif
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && flush <= old_flush &&
+ flush != Z_FINISH) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
+ (s->strategy == Z_RLE ? deflate_rle(s, flush) :
+ (*(configuration_table[s->level].func))(s, flush));
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align(s);
+ } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
+ _tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ if (s->lookahead == 0) {
+ s->strstart = 0;
+ s->block_start = 0L;
+ }
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+ Assert(strm->avail_out > 0, "bug2");
+
+ if (flush != Z_FINISH) return Z_OK;
+ if (s->wrap <= 0) return Z_STREAM_END;
+
+ /* Write the trailer */
+#ifdef GZIP
+ if (s->wrap == 2) {
+ put_byte(s, (Byte)(strm->adler & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
+ put_byte(s, (Byte)(strm->total_in & 0xff));
+ put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
+ put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
+ put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
+ }
+ else
+#endif
+ {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateEnd (strm)
+ z_streamp strm;
+{
+ int status;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+
+ status = strm->state->status;
+ if (status != INIT_STATE &&
+ status != EXTRA_STATE &&
+ status != NAME_STATE &&
+ status != COMMENT_STATE &&
+ status != HCRC_STATE &&
+ status != BUSY_STATE &&
+ status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+
+ /* Deallocate in reverse order of allocations: */
+ TRY_FREE(strm, strm->state->pending_buf);
+ TRY_FREE(strm, strm->state->head);
+ TRY_FREE(strm, strm->state->prev);
+ TRY_FREE(strm, strm->state->window);
+
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ * To simplify the source, this is not supported for 16-bit MSDOS (which
+ * doesn't have enough memory anyway to duplicate compression states).
+ */
+int ZEXPORT deflateCopy (dest, source)
+ z_streamp dest;
+ z_streamp source;
+{
+#ifdef MAXSEG_64K
+ return Z_STREAM_ERROR;
+#else
+ deflate_state *ds;
+ deflate_state *ss;
+ ushf *overlay;
+
+
+ if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
+ return Z_STREAM_ERROR;
+ }
+
+ ss = source->state;
+
+ zmemcpy(dest, source, sizeof(z_stream));
+
+ ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
+ if (ds == Z_NULL) return Z_MEM_ERROR;
+ dest->state = (struct internal_state FAR *) ds;
+ zmemcpy(ds, ss, sizeof(deflate_state));
+ ds->strm = dest;
+
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
+ overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
+ ds->pending_buf = (uchf *) overlay;
+
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
+ ds->pending_buf == Z_NULL) {
+ deflateEnd (dest);
+ return Z_MEM_ERROR;
+ }
+ /* following zmemcpy do not work for 16-bit MSDOS */
+ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+ zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
+ zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+ ds->bl_desc.dyn_tree = ds->bl_tree;
+
+ return Z_OK;
+#endif /* MAXSEG_64K */
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local int read_buf(strm, buf, size)
+ z_streamp strm;
+ Bytef *buf;
+ unsigned size;
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ if (strm->state->wrap == 1) {
+ strm->adler = adler32(strm->adler, strm->next_in, len);
+ }
+#ifdef GZIP
+ else if (strm->state->wrap == 2) {
+ strm->adler = crc32(strm->adler, strm->next_in, len);
+ }
+#endif
+ zmemcpy(buf, strm->next_in, len);
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init (s)
+ deflate_state *s;
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+#ifndef FASTEST
+#ifdef ASMV
+ match_init(); /* initialize the asm code */
+#endif
+#endif
+}
+
+#ifndef FASTEST
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+#ifndef ASMV
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Posf *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ushf*)scan;
+ register ush scan_end = *(ushf*)(scan+best_len-1);
+#else
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2. Note that the checks below
+ * for insufficient lookahead only occur occasionally for performance
+ * reasons. Therefore uninitialized memory will be accessed, and
+ * conditional jumps will be made that depend on those values.
+ * However the length of the match is limited to the lookahead, so
+ * the output of deflate is not affected by the uninitialized values.
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ushf*)(match+best_len-1) != scan_end ||
+ *(ushf*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ushf*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
+ return s->lookahead;
+}
+#endif /* ASMV */
+
+#else /* FASTEST */
+
+/* ---------------------------------------------------------------------------
+ * Optimized version for FASTEST only
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ Assert(cur_match < s->strstart, "no future");
+
+ match = s->window + cur_match;
+
+ /* Return failure if the match length is less than 2:
+ */
+ if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match += 2;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+
+ if (len < MIN_MATCH) return MIN_MATCH - 1;
+
+ s->match_start = cur_match;
+ return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
+}
+
+#endif /* FASTEST */
+
+#ifdef DEBUG
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+local void check_match(s, start, match, length)
+ deflate_state *s;
+ IPos start, match;
+ int length;
+{
+ /* check that the match is indeed a match */
+ if (zmemcmp(s->window + match,
+ s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif /* DEBUG */
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(s)
+ deflate_state *s;
+{
+ register unsigned n, m;
+ register Posf *p;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (sizeof(int) <= 2) {
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if
+ * strstart == 0 && lookahead == 1 (input done a byte at time)
+ */
+ more--;
+ }
+ }
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+
+ /* Slide the hash table (could be avoided with 32 bit values
+ at the expense of memory usage). We slide even when level == 0
+ to keep the hash table consistent if we switch back to level > 0
+ later. (Using level 0 permanently is not an optimal usage of
+ zlib, so we don't care about this pathological case.)
+ */
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ } while (--n);
+
+ n = wsize;
+#ifndef FASTEST
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+#endif
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) return;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead >= MIN_MATCH) {
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+
+ /* If the WIN_INIT bytes after the end of the current data have never been
+ * written, then zero those bytes in order to avoid memory check reports of
+ * the use of uninitialized (or uninitialised as Julian writes) bytes by
+ * the longest match routines. Update the high water mark for the next
+ * time through here. WIN_INIT is set to MAX_MATCH since the longest match
+ * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
+ */
+ if (s->high_water < s->window_size) {
+ ulg curr = s->strstart + (ulg)(s->lookahead);
+ ulg init;
+
+ if (s->high_water < curr) {
+ /* Previous high water mark below current data -- zero WIN_INIT
+ * bytes or up to end of window, whichever is less.
+ */
+ init = s->window_size - curr;
+ if (init > WIN_INIT)
+ init = WIN_INIT;
+ zmemzero(s->window + curr, (unsigned)init);
+ s->high_water = curr + init;
+ }
+ else if (s->high_water < (ulg)curr + WIN_INIT) {
+ /* High water mark at or above current data, but below current data
+ * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
+ * to end of window, whichever is less.
+ */
+ init = (ulg)curr + WIN_INIT - s->high_water;
+ if (init > s->window_size - s->high_water)
+ init = s->window_size - s->high_water;
+ zmemzero(s->window + s->high_water, (unsigned)init);
+ s->high_water += init;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, last) { \
+ _tr_flush_block(s, (s->block_start >= 0L ? \
+ (charf *)&s->window[(unsigned)s->block_start] : \
+ (charf *)Z_NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (last)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, last) { \
+ FLUSH_BLOCK_ONLY(s, last); \
+ if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+local block_state deflate_stored(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ * to pending_buf_size, and each stored block has a 5 byte header:
+ */
+ ulg max_block_size = 0xffff;
+ ulg max_start;
+
+ if (max_block_size > s->pending_buf_size - 5) {
+ max_block_size = s->pending_buf_size - 5;
+ }
+
+ /* Copy as much as possible from input to output: */
+ for (;;) {
+ /* Fill the window as much as possible: */
+ if (s->lookahead <= 1) {
+
+ Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+ s->block_start >= (long)s->w_size, "slide too late");
+
+ fill_window(s);
+ if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+ Assert(s->block_start >= 0L, "block gone");
+
+ s->strstart += s->lookahead;
+ s->lookahead = 0;
+
+ /* Emit a stored block if pending_buf will be full: */
+ max_start = s->block_start + max_block_size;
+ if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+ /* strstart == 0 is possible when wraparound on 16-bit machine */
+ s->lookahead = (uInt)(s->strstart - max_start);
+ s->strstart = (uInt)max_start;
+ FLUSH_BLOCK(s, 0);
+ }
+ /* Flush if we may have to slide, otherwise block_start may become
+ * negative and the data will be gone:
+ */
+ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+ FLUSH_BLOCK(s, 0);
+ }
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+local block_state deflate_fast(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ hash_head = NIL;
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ s->match_length = longest_match (s, hash_head);
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ _tr_tally_dist(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH, bflush);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+#ifndef FASTEST
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else
+#endif
+ {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+#ifndef FASTEST
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+local block_state deflate_slow(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ hash_head = NIL;
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ s->match_length = longest_match (s, hash_head);
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED
+#if TOO_FAR <= 32767
+ || (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR)
+#endif
+ )) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ _tr_tally_dist(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH, bflush);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ if (bflush) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ s->match_available = 0;
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+#endif /* FASTEST */
+
+/* ===========================================================================
+ * For Z_RLE, simply look for runs of bytes, generate matches only of distance
+ * one. Do not maintain a hash table. (It will be regenerated if this run of
+ * deflate switches away from Z_RLE.)
+ */
+local block_state deflate_rle(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ int bflush; /* set if current block must be flushed */
+ uInt prev; /* byte at distance one to match */
+ Bytef *scan, *strend; /* scan goes up to strend for length of run */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the longest encodable run.
+ */
+ if (s->lookahead < MAX_MATCH) {
+ fill_window(s);
+ if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* See how many times the previous byte repeats */
+ s->match_length = 0;
+ if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
+ scan = s->window + s->strstart - 1;
+ prev = *scan;
+ if (prev == *++scan && prev == *++scan && prev == *++scan) {
+ strend = s->window + s->strstart + MAX_MATCH;
+ do {
+ } while (prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ scan < strend);
+ s->match_length = MAX_MATCH - (int)(strend - scan);
+ if (s->match_length > s->lookahead)
+ s->match_length = s->lookahead;
+ }
+ }
+
+ /* Emit match if have run of MIN_MATCH or longer, else emit literal */
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->strstart - 1, s->match_length);
+
+ _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
+
+ s->lookahead -= s->match_length;
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
+ * (It will be regenerated if this run of deflate switches away from Huffman.)
+ */
+local block_state deflate_huff(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we have a literal to write. */
+ if (s->lookahead == 0) {
+ fill_window(s);
+ if (s->lookahead == 0) {
+ if (flush == Z_NO_FLUSH)
+ return need_more;
+ break; /* flush the current block */
+ }
+ }
+
+ /* Output a literal byte */
+ s->match_length = 0;
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
--- /dev/null
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995-2010 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef DEFLATE_H
+#define DEFLATE_H
+
+#include "zutil.h"
+
+/* define NO_GZIP when compiling if you want to disable gzip header and
+ trailer creation by deflate(). NO_GZIP would be used to avoid linking in
+ the crc code when it is not needed. For shared libraries, gzip encoding
+ should be left enabled. */
+#ifndef NO_GZIP
+# define GZIP
+#endif
+
+/* ===========================================================================
+ * Internal compression state.
+ */
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE 42
+#define EXTRA_STATE 69
+#define NAME_STATE 73
+#define COMMENT_STATE 91
+#define HCRC_STATE 103
+#define BUSY_STATE 113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} FAR ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ static_tree_desc *stat_desc; /* the corresponding static tree */
+} FAR tree_desc;
+
+typedef ush Pos;
+typedef Pos FAR Posf;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct internal_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Bytef *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Bytef *pending_out; /* next pending byte to output to the stream */
+ uInt pending; /* nb of bytes in the pending buffer */
+ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
+ gz_headerp gzhead; /* gzip header information to write */
+ uInt gzindex; /* where in extra, name, or comment */
+ Byte method; /* STORED (for zip only) or DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Bytef *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Posf *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Posf *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to supress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uchf *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ushf *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ uInt matches; /* number of string matches in current block */
+ int last_eob_len; /* bit length of EOB code for last block */
+
+#ifdef DEBUG
+ ulg compressed_len; /* total bit length of compressed file mod 2^32 */
+ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+ ulg high_water;
+ /* High water mark offset in window for initialized bytes -- bytes above
+ * this are set to zero in order to avoid memory check warnings when
+ * longest match routines access bytes past the input. This is then
+ * updated to the new high water mark.
+ */
+
+} FAR deflate_state;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+#define WIN_INIT MAX_MATCH
+/* Number of bytes after end of data in window to initialize in order to avoid
+ memory checker errors from longest match routines */
+
+ /* in trees.c */
+void ZLIB_INTERNAL _tr_init OF((deflate_state *s));
+int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
+void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf,
+ ulg stored_len, int last));
+void ZLIB_INTERNAL _tr_align OF((deflate_state *s));
+void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
+ ulg stored_len, int last));
+
+#define d_code(dist) \
+ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. _dist_code[256] and _dist_code[257] are never
+ * used.
+ */
+
+#ifndef DEBUG
+/* Inline versions of _tr_tally for speed: */
+
+#if defined(GEN_TREES_H) || !defined(STDC)
+ extern uch ZLIB_INTERNAL _length_code[];
+ extern uch ZLIB_INTERNAL _dist_code[];
+#else
+ extern const uch ZLIB_INTERNAL _length_code[];
+ extern const uch ZLIB_INTERNAL _dist_code[];
+#endif
+
+# define _tr_tally_lit(s, c, flush) \
+ { uch cc = (c); \
+ s->d_buf[s->last_lit] = 0; \
+ s->l_buf[s->last_lit++] = cc; \
+ s->dyn_ltree[cc].Freq++; \
+ flush = (s->last_lit == s->lit_bufsize-1); \
+ }
+# define _tr_tally_dist(s, distance, length, flush) \
+ { uch len = (length); \
+ ush dist = (distance); \
+ s->d_buf[s->last_lit] = dist; \
+ s->l_buf[s->last_lit++] = len; \
+ dist--; \
+ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
+ s->dyn_dtree[d_code(dist)].Freq++; \
+ flush = (s->last_lit == s->lit_bufsize-1); \
+ }
+#else
+# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
+# define _tr_tally_dist(s, distance, length, flush) \
+ flush = _tr_tally(s, distance, length)
+#endif
+
+#endif /* DEFLATE_H */
--- /dev/null
+/* infback.c -- inflate using a call-back interface
+ * Copyright (C) 1995-2009 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ This code is largely copied from inflate.c. Normally either infback.o or
+ inflate.o would be linked into an application--not both. The interface
+ with inffast.c is retained so that optimized assembler-coded versions of
+ inflate_fast() can be used with either inflate.c or infback.c.
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+/* function prototypes */
+local void fixedtables OF((struct inflate_state FAR *state));
+
+/*
+ strm provides memory allocation functions in zalloc and zfree, or
+ Z_NULL to use the library memory allocation functions.
+
+ windowBits is in the range 8..15, and window is a user-supplied
+ window and output buffer that is 2**windowBits bytes.
+ */
+int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size)
+z_streamp strm;
+int windowBits;
+unsigned char FAR *window;
+const char *version;
+int stream_size;
+{
+ struct inflate_state FAR *state;
+
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != (int)(sizeof(z_stream)))
+ return Z_VERSION_ERROR;
+ if (strm == Z_NULL || window == Z_NULL ||
+ windowBits < 8 || windowBits > 15)
+ return Z_STREAM_ERROR;
+ strm->msg = Z_NULL; /* in case we return an error */
+ if (strm->zalloc == (alloc_func)0) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+ }
+ if (strm->zfree == (free_func)0) strm->zfree = zcfree;
+ state = (struct inflate_state FAR *)ZALLOC(strm, 1,
+ sizeof(struct inflate_state));
+ if (state == Z_NULL) return Z_MEM_ERROR;
+ Tracev((stderr, "inflate: allocated\n"));
+ strm->state = (struct internal_state FAR *)state;
+ state->dmax = 32768U;
+ state->wbits = windowBits;
+ state->wsize = 1U << windowBits;
+ state->window = window;
+ state->wnext = 0;
+ state->whave = 0;
+ return Z_OK;
+}
+
+/*
+ Return state with length and distance decoding tables and index sizes set to
+ fixed code decoding. Normally this returns fixed tables from inffixed.h.
+ If BUILDFIXED is defined, then instead this routine builds the tables the
+ first time it's called, and returns those tables the first time and
+ thereafter. This reduces the size of the code by about 2K bytes, in
+ exchange for a little execution time. However, BUILDFIXED should not be
+ used for threaded applications, since the rewriting of the tables and virgin
+ may not be thread-safe.
+ */
+local void fixedtables(state)
+struct inflate_state FAR *state;
+{
+#ifdef BUILDFIXED
+ static int virgin = 1;
+ static code *lenfix, *distfix;
+ static code fixed[544];
+
+ /* build fixed huffman tables if first call (may not be thread safe) */
+ if (virgin) {
+ unsigned sym, bits;
+ static code *next;
+
+ /* literal/length table */
+ sym = 0;
+ while (sym < 144) state->lens[sym++] = 8;
+ while (sym < 256) state->lens[sym++] = 9;
+ while (sym < 280) state->lens[sym++] = 7;
+ while (sym < 288) state->lens[sym++] = 8;
+ next = fixed;
+ lenfix = next;
+ bits = 9;
+ inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
+
+ /* distance table */
+ sym = 0;
+ while (sym < 32) state->lens[sym++] = 5;
+ distfix = next;
+ bits = 5;
+ inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
+
+ /* do this just once */
+ virgin = 0;
+ }
+#else /* !BUILDFIXED */
+# include "inffixed.h"
+#endif /* BUILDFIXED */
+ state->lencode = lenfix;
+ state->lenbits = 9;
+ state->distcode = distfix;
+ state->distbits = 5;
+}
+
+/* Macros for inflateBack(): */
+
+/* Load returned state from inflate_fast() */
+#define LOAD() \
+ do { \
+ put = strm->next_out; \
+ left = strm->avail_out; \
+ next = strm->next_in; \
+ have = strm->avail_in; \
+ hold = state->hold; \
+ bits = state->bits; \
+ } while (0)
+
+/* Set state from registers for inflate_fast() */
+#define RESTORE() \
+ do { \
+ strm->next_out = put; \
+ strm->avail_out = left; \
+ strm->next_in = next; \
+ strm->avail_in = have; \
+ state->hold = hold; \
+ state->bits = bits; \
+ } while (0)
+
+/* Clear the input bit accumulator */
+#define INITBITS() \
+ do { \
+ hold = 0; \
+ bits = 0; \
+ } while (0)
+
+/* Assure that some input is available. If input is requested, but denied,
+ then return a Z_BUF_ERROR from inflateBack(). */
+#define PULL() \
+ do { \
+ if (have == 0) { \
+ have = in(in_desc, &next); \
+ if (have == 0) { \
+ next = Z_NULL; \
+ ret = Z_BUF_ERROR; \
+ goto inf_leave; \
+ } \
+ } \
+ } while (0)
+
+/* Get a byte of input into the bit accumulator, or return from inflateBack()
+ with an error if there is no input available. */
+#define PULLBYTE() \
+ do { \
+ PULL(); \
+ have--; \
+ hold += (unsigned long)(*next++) << bits; \
+ bits += 8; \
+ } while (0)
+
+/* Assure that there are at least n bits in the bit accumulator. If there is
+ not enough available input to do that, then return from inflateBack() with
+ an error. */
+#define NEEDBITS(n) \
+ do { \
+ while (bits < (unsigned)(n)) \
+ PULLBYTE(); \
+ } while (0)
+
+/* Return the low n bits of the bit accumulator (n < 16) */
+#define BITS(n) \
+ ((unsigned)hold & ((1U << (n)) - 1))
+
+/* Remove n bits from the bit accumulator */
+#define DROPBITS(n) \
+ do { \
+ hold >>= (n); \
+ bits -= (unsigned)(n); \
+ } while (0)
+
+/* Remove zero to seven bits as needed to go to a byte boundary */
+#define BYTEBITS() \
+ do { \
+ hold >>= bits & 7; \
+ bits -= bits & 7; \
+ } while (0)
+
+/* Assure that some output space is available, by writing out the window
+ if it's full. If the write fails, return from inflateBack() with a
+ Z_BUF_ERROR. */
+#define ROOM() \
+ do { \
+ if (left == 0) { \
+ put = state->window; \
+ left = state->wsize; \
+ state->whave = left; \
+ if (out(out_desc, put, left)) { \
+ ret = Z_BUF_ERROR; \
+ goto inf_leave; \
+ } \
+ } \
+ } while (0)
+
+/*
+ strm provides the memory allocation functions and window buffer on input,
+ and provides information on the unused input on return. For Z_DATA_ERROR
+ returns, strm will also provide an error message.
+
+ in() and out() are the call-back input and output functions. When
+ inflateBack() needs more input, it calls in(). When inflateBack() has
+ filled the window with output, or when it completes with data in the
+ window, it calls out() to write out the data. The application must not
+ change the provided input until in() is called again or inflateBack()
+ returns. The application must not change the window/output buffer until
+ inflateBack() returns.
+
+ in() and out() are called with a descriptor parameter provided in the
+ inflateBack() call. This parameter can be a structure that provides the
+ information required to do the read or write, as well as accumulated
+ information on the input and output such as totals and check values.
+
+ in() should return zero on failure. out() should return non-zero on
+ failure. If either in() or out() fails, than inflateBack() returns a
+ Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it
+ was in() or out() that caused in the error. Otherwise, inflateBack()
+ returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format
+ error, or Z_MEM_ERROR if it could not allocate memory for the state.
+ inflateBack() can also return Z_STREAM_ERROR if the input parameters
+ are not correct, i.e. strm is Z_NULL or the state was not initialized.
+ */
+int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc)
+z_streamp strm;
+in_func in;
+void FAR *in_desc;
+out_func out;
+void FAR *out_desc;
+{
+ struct inflate_state FAR *state;
+ unsigned char FAR *next; /* next input */
+ unsigned char FAR *put; /* next output */
+ unsigned have, left; /* available input and output */
+ unsigned long hold; /* bit buffer */
+ unsigned bits; /* bits in bit buffer */
+ unsigned copy; /* number of stored or match bytes to copy */
+ unsigned char FAR *from; /* where to copy match bytes from */
+ code here; /* current decoding table entry */
+ code last; /* parent table entry */
+ unsigned len; /* length to copy for repeats, bits to drop */
+ int ret; /* return code */
+ static const unsigned short order[19] = /* permutation of code lengths */
+ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+ /* Check that the strm exists and that the state was initialized */
+ if (strm == Z_NULL || strm->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* Reset the state */
+ strm->msg = Z_NULL;
+ state->mode = TYPE;
+ state->last = 0;
+ state->whave = 0;
+ next = strm->next_in;
+ have = next != Z_NULL ? strm->avail_in : 0;
+ hold = 0;
+ bits = 0;
+ put = state->window;
+ left = state->wsize;
+
+ /* Inflate until end of block marked as last */
+ for (;;)
+ switch (state->mode) {
+ case TYPE:
+ /* determine and dispatch block type */
+ if (state->last) {
+ BYTEBITS();
+ state->mode = DONE;
+ break;
+ }
+ NEEDBITS(3);
+ state->last = BITS(1);
+ DROPBITS(1);
+ switch (BITS(2)) {
+ case 0: /* stored block */
+ Tracev((stderr, "inflate: stored block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = STORED;
+ break;
+ case 1: /* fixed block */
+ fixedtables(state);
+ Tracev((stderr, "inflate: fixed codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = LEN; /* decode codes */
+ break;
+ case 2: /* dynamic block */
+ Tracev((stderr, "inflate: dynamic codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = TABLE;
+ break;
+ case 3:
+ strm->msg = (char *)"invalid block type";
+ state->mode = BAD;
+ }
+ DROPBITS(2);
+ break;
+
+ case STORED:
+ /* get and verify stored block length */
+ BYTEBITS(); /* go to byte boundary */
+ NEEDBITS(32);
+ if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
+ strm->msg = (char *)"invalid stored block lengths";
+ state->mode = BAD;
+ break;
+ }
+ state->length = (unsigned)hold & 0xffff;
+ Tracev((stderr, "inflate: stored length %u\n",
+ state->length));
+ INITBITS();
+
+ /* copy stored block from input to output */
+ while (state->length != 0) {
+ copy = state->length;
+ PULL();
+ ROOM();
+ if (copy > have) copy = have;
+ if (copy > left) copy = left;
+ zmemcpy(put, next, copy);
+ have -= copy;
+ next += copy;
+ left -= copy;
+ put += copy;
+ state->length -= copy;
+ }
+ Tracev((stderr, "inflate: stored end\n"));
+ state->mode = TYPE;
+ break;
+
+ case TABLE:
+ /* get dynamic table entries descriptor */
+ NEEDBITS(14);
+ state->nlen = BITS(5) + 257;
+ DROPBITS(5);
+ state->ndist = BITS(5) + 1;
+ DROPBITS(5);
+ state->ncode = BITS(4) + 4;
+ DROPBITS(4);
+#ifndef PKZIP_BUG_WORKAROUND
+ if (state->nlen > 286 || state->ndist > 30) {
+ strm->msg = (char *)"too many length or distance symbols";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ Tracev((stderr, "inflate: table sizes ok\n"));
+
+ /* get code length code lengths (not a typo) */
+ state->have = 0;
+ while (state->have < state->ncode) {
+ NEEDBITS(3);
+ state->lens[order[state->have++]] = (unsigned short)BITS(3);
+ DROPBITS(3);
+ }
+ while (state->have < 19)
+ state->lens[order[state->have++]] = 0;
+ state->next = state->codes;
+ state->lencode = (code const FAR *)(state->next);
+ state->lenbits = 7;
+ ret = inflate_table(CODES, state->lens, 19, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid code lengths set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: code lengths ok\n"));
+
+ /* get length and distance code code lengths */
+ state->have = 0;
+ while (state->have < state->nlen + state->ndist) {
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.val < 16) {
+ NEEDBITS(here.bits);
+ DROPBITS(here.bits);
+ state->lens[state->have++] = here.val;
+ }
+ else {
+ if (here.val == 16) {
+ NEEDBITS(here.bits + 2);
+ DROPBITS(here.bits);
+ if (state->have == 0) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ len = (unsigned)(state->lens[state->have - 1]);
+ copy = 3 + BITS(2);
+ DROPBITS(2);
+ }
+ else if (here.val == 17) {
+ NEEDBITS(here.bits + 3);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 3 + BITS(3);
+ DROPBITS(3);
+ }
+ else {
+ NEEDBITS(here.bits + 7);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 11 + BITS(7);
+ DROPBITS(7);
+ }
+ if (state->have + copy > state->nlen + state->ndist) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ while (copy--)
+ state->lens[state->have++] = (unsigned short)len;
+ }
+ }
+
+ /* handle error breaks in while */
+ if (state->mode == BAD) break;
+
+ /* check for end-of-block code (better have one) */
+ if (state->lens[256] == 0) {
+ strm->msg = (char *)"invalid code -- missing end-of-block";
+ state->mode = BAD;
+ break;
+ }
+
+ /* build code tables -- note: do not change the lenbits or distbits
+ values here (9 and 6) without reading the comments in inftrees.h
+ concerning the ENOUGH constants, which depend on those values */
+ state->next = state->codes;
+ state->lencode = (code const FAR *)(state->next);
+ state->lenbits = 9;
+ ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid literal/lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->distcode = (code const FAR *)(state->next);
+ state->distbits = 6;
+ ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
+ &(state->next), &(state->distbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid distances set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: codes ok\n"));
+ state->mode = LEN;
+
+ case LEN:
+ /* use inflate_fast() if we have enough input and output */
+ if (have >= 6 && left >= 258) {
+ RESTORE();
+ if (state->whave < state->wsize)
+ state->whave = state->wsize - left;
+ inflate_fast(strm, state->wsize);
+ LOAD();
+ break;
+ }
+
+ /* get a literal, length, or end-of-block code */
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.op && (here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->lencode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(here.bits);
+ state->length = (unsigned)here.val;
+
+ /* process literal */
+ if (here.op == 0) {
+ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here.val));
+ ROOM();
+ *put++ = (unsigned char)(state->length);
+ left--;
+ state->mode = LEN;
+ break;
+ }
+
+ /* process end of block */
+ if (here.op & 32) {
+ Tracevv((stderr, "inflate: end of block\n"));
+ state->mode = TYPE;
+ break;
+ }
+
+ /* invalid code */
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+
+ /* length code -- get extra bits, if any */
+ state->extra = (unsigned)(here.op) & 15;
+ if (state->extra != 0) {
+ NEEDBITS(state->extra);
+ state->length += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+ Tracevv((stderr, "inflate: length %u\n", state->length));
+
+ /* get distance code */
+ for (;;) {
+ here = state->distcode[BITS(state->distbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if ((here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->distcode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(here.bits);
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ state->offset = (unsigned)here.val;
+
+ /* get distance extra bits, if any */
+ state->extra = (unsigned)(here.op) & 15;
+ if (state->extra != 0) {
+ NEEDBITS(state->extra);
+ state->offset += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+ if (state->offset > state->wsize - (state->whave < state->wsize ?
+ left : 0)) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+ Tracevv((stderr, "inflate: distance %u\n", state->offset));
+
+ /* copy match from window to output */
+ do {
+ ROOM();
+ copy = state->wsize - state->offset;
+ if (copy < left) {
+ from = put + copy;
+ copy = left - copy;
+ }
+ else {
+ from = put - state->offset;
+ copy = left;
+ }
+ if (copy > state->length) copy = state->length;
+ state->length -= copy;
+ left -= copy;
+ do {
+ *put++ = *from++;
+ } while (--copy);
+ } while (state->length != 0);
+ break;
+
+ case DONE:
+ /* inflate stream terminated properly -- write leftover output */
+ ret = Z_STREAM_END;
+ if (left < state->wsize) {
+ if (out(out_desc, state->window, state->wsize - left))
+ ret = Z_BUF_ERROR;
+ }
+ goto inf_leave;
+
+ case BAD:
+ ret = Z_DATA_ERROR;
+ goto inf_leave;
+
+ default: /* can't happen, but makes compilers happy */
+ ret = Z_STREAM_ERROR;
+ goto inf_leave;
+ }
+
+ /* Return unused input */
+ inf_leave:
+ strm->next_in = next;
+ strm->avail_in = have;
+ return ret;
+}
+
+int ZEXPORT inflateBackEnd(strm)
+z_streamp strm;
+{
+ if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0)
+ return Z_STREAM_ERROR;
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+ Tracev((stderr, "inflate: end\n"));
+ return Z_OK;
+}
--- /dev/null
+/* inffast.c -- fast decoding
+ * Copyright (C) 1995-2008, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+#ifndef ASMINF
+
+/* Allow machine dependent optimization for post-increment or pre-increment.
+ Based on testing to date,
+ Pre-increment preferred for:
+ - PowerPC G3 (Adler)
+ - MIPS R5000 (Randers-Pehrson)
+ Post-increment preferred for:
+ - none
+ No measurable difference:
+ - Pentium III (Anderson)
+ - M68060 (Nikl)
+ */
+#ifdef POSTINC
+# define OFF 0
+# define PUP(a) *(a)++
+#else
+# define OFF 1
+# define PUP(a) *++(a)
+#endif
+
+/*
+ Decode literal, length, and distance codes and write out the resulting
+ literal and match bytes until either not enough input or output is
+ available, an end-of-block is encountered, or a data error is encountered.
+ When large enough input and output buffers are supplied to inflate(), for
+ example, a 16K input buffer and a 64K output buffer, more than 95% of the
+ inflate execution time is spent in this routine.
+
+ Entry assumptions:
+
+ state->mode == LEN
+ strm->avail_in >= 6
+ strm->avail_out >= 258
+ start >= strm->avail_out
+ state->bits < 8
+
+ On return, state->mode is one of:
+
+ LEN -- ran out of enough output space or enough available input
+ TYPE -- reached end of block code, inflate() to interpret next block
+ BAD -- error in block data
+
+ Notes:
+
+ - The maximum input bits used by a length/distance pair is 15 bits for the
+ length code, 5 bits for the length extra, 15 bits for the distance code,
+ and 13 bits for the distance extra. This totals 48 bits, or six bytes.
+ Therefore if strm->avail_in >= 6, then there is enough input to avoid
+ checking for available input while decoding.
+
+ - The maximum bytes that a single length/distance pair can output is 258
+ bytes, which is the maximum length that can be coded. inflate_fast()
+ requires strm->avail_out >= 258 for each loop to avoid checking for
+ output space.
+ */
+void ZLIB_INTERNAL inflate_fast(strm, start)
+z_streamp strm;
+unsigned start; /* inflate()'s starting value for strm->avail_out */
+{
+ struct inflate_state FAR *state;
+ unsigned char FAR *in; /* local strm->next_in */
+ unsigned char FAR *last; /* while in < last, enough input available */
+ unsigned char FAR *out; /* local strm->next_out */
+ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */
+ unsigned char FAR *end; /* while out < end, enough space available */
+#ifdef INFLATE_STRICT
+ unsigned dmax; /* maximum distance from zlib header */
+#endif
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned wnext; /* window write index */
+ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */
+ unsigned long hold; /* local strm->hold */
+ unsigned bits; /* local strm->bits */
+ code const FAR *lcode; /* local strm->lencode */
+ code const FAR *dcode; /* local strm->distcode */
+ unsigned lmask; /* mask for first level of length codes */
+ unsigned dmask; /* mask for first level of distance codes */
+ code here; /* retrieved table entry */
+ unsigned op; /* code bits, operation, extra bits, or */
+ /* window position, window bytes to copy */
+ unsigned len; /* match length, unused bytes */
+ unsigned dist; /* match distance */
+ unsigned char FAR *from; /* where to copy match from */
+
+ /* copy state to local variables */
+ state = (struct inflate_state FAR *)strm->state;
+ in = strm->next_in - OFF;
+ last = in + (strm->avail_in - 5);
+ out = strm->next_out - OFF;
+ beg = out - (start - strm->avail_out);
+ end = out + (strm->avail_out - 257);
+#ifdef INFLATE_STRICT
+ dmax = state->dmax;
+#endif
+ wsize = state->wsize;
+ whave = state->whave;
+ wnext = state->wnext;
+ window = state->window;
+ hold = state->hold;
+ bits = state->bits;
+ lcode = state->lencode;
+ dcode = state->distcode;
+ lmask = (1U << state->lenbits) - 1;
+ dmask = (1U << state->distbits) - 1;
+
+ /* decode literals and length/distances until end-of-block or not enough
+ input data or output space */
+ do {
+ if (bits < 15) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ here = lcode[hold & lmask];
+ dolen:
+ op = (unsigned)(here.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(here.op);
+ if (op == 0) { /* literal */
+ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here.val));
+ PUP(out) = (unsigned char)(here.val);
+ }
+ else if (op & 16) { /* length base */
+ len = (unsigned)(here.val);
+ op &= 15; /* number of extra bits */
+ if (op) {
+ if (bits < op) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ len += (unsigned)hold & ((1U << op) - 1);
+ hold >>= op;
+ bits -= op;
+ }
+ Tracevv((stderr, "inflate: length %u\n", len));
+ if (bits < 15) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ here = dcode[hold & dmask];
+ dodist:
+ op = (unsigned)(here.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(here.op);
+ if (op & 16) { /* distance base */
+ dist = (unsigned)(here.val);
+ op &= 15; /* number of extra bits */
+ if (bits < op) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ if (bits < op) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ }
+ dist += (unsigned)hold & ((1U << op) - 1);
+#ifdef INFLATE_STRICT
+ if (dist > dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ hold >>= op;
+ bits -= op;
+ Tracevv((stderr, "inflate: distance %u\n", dist));
+ op = (unsigned)(out - beg); /* max distance in output */
+ if (dist > op) { /* see if copy from window */
+ op = dist - op; /* distance back in window */
+ if (op > whave) {
+ if (state->sane) {
+ strm->msg =
+ (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
+ if (len <= op - whave) {
+ do {
+ PUP(out) = 0;
+ } while (--len);
+ continue;
+ }
+ len -= op - whave;
+ do {
+ PUP(out) = 0;
+ } while (--op > whave);
+ if (op == 0) {
+ from = out - dist;
+ do {
+ PUP(out) = PUP(from);
+ } while (--len);
+ continue;
+ }
+#endif
+ }
+ from = window - OFF;
+ if (wnext == 0) { /* very common case */
+ from += wsize - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ else if (wnext < op) { /* wrap around window */
+ from += wsize + wnext - op;
+ op -= wnext;
+ if (op < len) { /* some from end of window */
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = window - OFF;
+ if (wnext < len) { /* some from start of window */
+ op = wnext;
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ }
+ else { /* contiguous in window */
+ from += wnext - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ while (len > 2) {
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ len -= 3;
+ }
+ if (len) {
+ PUP(out) = PUP(from);
+ if (len > 1)
+ PUP(out) = PUP(from);
+ }
+ }
+ else {
+ from = out - dist; /* copy direct from output */
+ do { /* minimum length is three */
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ len -= 3;
+ } while (len > 2);
+ if (len) {
+ PUP(out) = PUP(from);
+ if (len > 1)
+ PUP(out) = PUP(from);
+ }
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level distance code */
+ here = dcode[here.val + (hold & ((1U << op) - 1))];
+ goto dodist;
+ }
+ else {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level length code */
+ here = lcode[here.val + (hold & ((1U << op) - 1))];
+ goto dolen;
+ }
+ else if (op & 32) { /* end-of-block */
+ Tracevv((stderr, "inflate: end of block\n"));
+ state->mode = TYPE;
+ break;
+ }
+ else {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ } while (in < last && out < end);
+
+ /* return unused bytes (on entry, bits < 8, so in won't go too far back) */
+ len = bits >> 3;
+ in -= len;
+ bits -= len << 3;
+ hold &= (1U << bits) - 1;
+
+ /* update state and return */
+ strm->next_in = in + OFF;
+ strm->next_out = out + OFF;
+ strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
+ strm->avail_out = (unsigned)(out < end ?
+ 257 + (end - out) : 257 - (out - end));
+ state->hold = hold;
+ state->bits = bits;
+ return;
+}
+
+/*
+ inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
+ - Using bit fields for code structure
+ - Different op definition to avoid & for extra bits (do & for table bits)
+ - Three separate decoding do-loops for direct, window, and wnext == 0
+ - Special case for distance > 1 copies to do overlapped load and store copy
+ - Explicit branch predictions (based on measured branch probabilities)
+ - Deferring match copy and interspersed it with decoding subsequent codes
+ - Swapping literal/length else
+ - Swapping window/direct else
+ - Larger unrolled copy loops (three is about right)
+ - Moving len -= 3 statement into middle of loop
+ */
+
+#endif /* !ASMINF */
--- /dev/null
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-2003, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start));
--- /dev/null
+ /* inffixed.h -- table for decoding fixed codes
+ * Generated automatically by makefixed().
+ */
+
+ /* WARNING: this file should *not* be used by applications. It
+ is part of the implementation of the compression library and
+ is subject to change. Applications should only use zlib.h.
+ */
+
+ static const code lenfix[512] = {
+ {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
+ {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
+ {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
+ {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
+ {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
+ {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
+ {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
+ {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
+ {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
+ {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
+ {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
+ {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
+ {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
+ {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
+ {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
+ {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
+ {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
+ {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
+ {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
+ {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
+ {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
+ {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
+ {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
+ {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
+ {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
+ {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
+ {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
+ {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
+ {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
+ {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
+ {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
+ {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
+ {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
+ {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
+ {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
+ {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
+ {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
+ {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
+ {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
+ {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
+ {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
+ {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
+ {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
+ {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
+ {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
+ {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
+ {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
+ {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
+ {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
+ {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
+ {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
+ {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
+ {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
+ {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
+ {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
+ {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
+ {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
+ {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
+ {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
+ {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
+ {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
+ {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
+ {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
+ {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
+ {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
+ {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
+ {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
+ {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
+ {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
+ {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
+ {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
+ {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
+ {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
+ {0,9,255}
+ };
+
+ static const code distfix[32] = {
+ {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
+ {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
+ {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
+ {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
+ {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
+ {22,5,193},{64,5,0}
+ };
--- /dev/null
+/* inflate.c -- zlib decompression
+ * Copyright (C) 1995-2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * Change history:
+ *
+ * 1.2.beta0 24 Nov 2002
+ * - First version -- complete rewrite of inflate to simplify code, avoid
+ * creation of window when not needed, minimize use of window when it is
+ * needed, make inffast.c even faster, implement gzip decoding, and to
+ * improve code readability and style over the previous zlib inflate code
+ *
+ * 1.2.beta1 25 Nov 2002
+ * - Use pointers for available input and output checking in inffast.c
+ * - Remove input and output counters in inffast.c
+ * - Change inffast.c entry and loop from avail_in >= 7 to >= 6
+ * - Remove unnecessary second byte pull from length extra in inffast.c
+ * - Unroll direct copy to three copies per loop in inffast.c
+ *
+ * 1.2.beta2 4 Dec 2002
+ * - Change external routine names to reduce potential conflicts
+ * - Correct filename to inffixed.h for fixed tables in inflate.c
+ * - Make hbuf[] unsigned char to match parameter type in inflate.c
+ * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset)
+ * to avoid negation problem on Alphas (64 bit) in inflate.c
+ *
+ * 1.2.beta3 22 Dec 2002
+ * - Add comments on state->bits assertion in inffast.c
+ * - Add comments on op field in inftrees.h
+ * - Fix bug in reuse of allocated window after inflateReset()
+ * - Remove bit fields--back to byte structure for speed
+ * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths
+ * - Change post-increments to pre-increments in inflate_fast(), PPC biased?
+ * - Add compile time option, POSTINC, to use post-increments instead (Intel?)
+ * - Make MATCH copy in inflate() much faster for when inflate_fast() not used
+ * - Use local copies of stream next and avail values, as well as local bit
+ * buffer and bit count in inflate()--for speed when inflate_fast() not used
+ *
+ * 1.2.beta4 1 Jan 2003
+ * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings
+ * - Move a comment on output buffer sizes from inffast.c to inflate.c
+ * - Add comments in inffast.c to introduce the inflate_fast() routine
+ * - Rearrange window copies in inflate_fast() for speed and simplification
+ * - Unroll last copy for window match in inflate_fast()
+ * - Use local copies of window variables in inflate_fast() for speed
+ * - Pull out common wnext == 0 case for speed in inflate_fast()
+ * - Make op and len in inflate_fast() unsigned for consistency
+ * - Add FAR to lcode and dcode declarations in inflate_fast()
+ * - Simplified bad distance check in inflate_fast()
+ * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new
+ * source file infback.c to provide a call-back interface to inflate for
+ * programs like gzip and unzip -- uses window as output buffer to avoid
+ * window copying
+ *
+ * 1.2.beta5 1 Jan 2003
+ * - Improved inflateBack() interface to allow the caller to provide initial
+ * input in strm.
+ * - Fixed stored blocks bug in inflateBack()
+ *
+ * 1.2.beta6 4 Jan 2003
+ * - Added comments in inffast.c on effectiveness of POSTINC
+ * - Typecasting all around to reduce compiler warnings
+ * - Changed loops from while (1) or do {} while (1) to for (;;), again to
+ * make compilers happy
+ * - Changed type of window in inflateBackInit() to unsigned char *
+ *
+ * 1.2.beta7 27 Jan 2003
+ * - Changed many types to unsigned or unsigned short to avoid warnings
+ * - Added inflateCopy() function
+ *
+ * 1.2.0 9 Mar 2003
+ * - Changed inflateBack() interface to provide separate opaque descriptors
+ * for the in() and out() functions
+ * - Changed inflateBack() argument and in_func typedef to swap the length
+ * and buffer address return values for the input function
+ * - Check next_in and next_out for Z_NULL on entry to inflate()
+ *
+ * The history for versions after 1.2.0 are in ChangeLog in zlib distribution.
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+#ifdef MAKEFIXED
+# ifndef BUILDFIXED
+# define BUILDFIXED
+# endif
+#endif
+
+/* function prototypes */
+local void fixedtables OF((struct inflate_state FAR *state));
+local int updatewindow OF((z_streamp strm, unsigned out));
+#ifdef BUILDFIXED
+ void makefixed OF((void));
+#endif
+local unsigned syncsearch OF((unsigned FAR *have, unsigned char FAR *buf,
+ unsigned len));
+
+int ZEXPORT inflateReset(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ strm->total_in = strm->total_out = state->total = 0;
+ strm->msg = Z_NULL;
+ strm->adler = 1; /* to support ill-conceived Java test suite */
+ state->mode = HEAD;
+ state->last = 0;
+ state->havedict = 0;
+ state->dmax = 32768U;
+ state->head = Z_NULL;
+ state->wsize = 0;
+ state->whave = 0;
+ state->wnext = 0;
+ state->hold = 0;
+ state->bits = 0;
+ state->lencode = state->distcode = state->next = state->codes;
+ state->sane = 1;
+ state->back = -1;
+ Tracev((stderr, "inflate: reset\n"));
+ return Z_OK;
+}
+
+int ZEXPORT inflateReset2(strm, windowBits)
+z_streamp strm;
+int windowBits;
+{
+ int wrap;
+ struct inflate_state FAR *state;
+
+ /* get the state */
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* extract wrap request from windowBits parameter */
+ if (windowBits < 0) {
+ wrap = 0;
+ windowBits = -windowBits;
+ }
+ else {
+ wrap = (windowBits >> 4) + 1;
+#ifdef GUNZIP
+ if (windowBits < 48)
+ windowBits &= 15;
+#endif
+ }
+
+ /* set number of window bits, free window if different */
+ if (windowBits && (windowBits < 8 || windowBits > 15))
+ return Z_STREAM_ERROR;
+ if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) {
+ ZFREE(strm, state->window);
+ state->window = Z_NULL;
+ }
+
+ /* update state and reset the rest of it */
+ state->wrap = wrap;
+ state->wbits = (unsigned)windowBits;
+ return inflateReset(strm);
+}
+
+int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size)
+z_streamp strm;
+int windowBits;
+const char *version;
+int stream_size;
+{
+ int ret;
+ struct inflate_state FAR *state;
+
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != (int)(sizeof(z_stream)))
+ return Z_VERSION_ERROR;
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+ strm->msg = Z_NULL; /* in case we return an error */
+ if (strm->zalloc == (alloc_func)0) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+ }
+ if (strm->zfree == (free_func)0) strm->zfree = zcfree;
+ state = (struct inflate_state FAR *)
+ ZALLOC(strm, 1, sizeof(struct inflate_state));
+ if (state == Z_NULL) return Z_MEM_ERROR;
+ Tracev((stderr, "inflate: allocated\n"));
+ strm->state = (struct internal_state FAR *)state;
+ state->window = Z_NULL;
+ ret = inflateReset2(strm, windowBits);
+ if (ret != Z_OK) {
+ ZFREE(strm, state);
+ strm->state = Z_NULL;
+ }
+ return ret;
+}
+
+int ZEXPORT inflateInit_(strm, version, stream_size)
+z_streamp strm;
+const char *version;
+int stream_size;
+{
+ return inflateInit2_(strm, DEF_WBITS, version, stream_size);
+}
+
+int ZEXPORT inflatePrime(strm, bits, value)
+z_streamp strm;
+int bits;
+int value;
+{
+ struct inflate_state FAR *state;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (bits < 0) {
+ state->hold = 0;
+ state->bits = 0;
+ return Z_OK;
+ }
+ if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
+ value &= (1L << bits) - 1;
+ state->hold += value << state->bits;
+ state->bits += bits;
+ return Z_OK;
+}
+
+/*
+ Return state with length and distance decoding tables and index sizes set to
+ fixed code decoding. Normally this returns fixed tables from inffixed.h.
+ If BUILDFIXED is defined, then instead this routine builds the tables the
+ first time it's called, and returns those tables the first time and
+ thereafter. This reduces the size of the code by about 2K bytes, in
+ exchange for a little execution time. However, BUILDFIXED should not be
+ used for threaded applications, since the rewriting of the tables and virgin
+ may not be thread-safe.
+ */
+local void fixedtables(state)
+struct inflate_state FAR *state;
+{
+#ifdef BUILDFIXED
+ static int virgin = 1;
+ static code *lenfix, *distfix;
+ static code fixed[544];
+
+ /* build fixed huffman tables if first call (may not be thread safe) */
+ if (virgin) {
+ unsigned sym, bits;
+ static code *next;
+
+ /* literal/length table */
+ sym = 0;
+ while (sym < 144) state->lens[sym++] = 8;
+ while (sym < 256) state->lens[sym++] = 9;
+ while (sym < 280) state->lens[sym++] = 7;
+ while (sym < 288) state->lens[sym++] = 8;
+ next = fixed;
+ lenfix = next;
+ bits = 9;
+ inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
+
+ /* distance table */
+ sym = 0;
+ while (sym < 32) state->lens[sym++] = 5;
+ distfix = next;
+ bits = 5;
+ inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
+
+ /* do this just once */
+ virgin = 0;
+ }
+#else /* !BUILDFIXED */
+# include "inffixed.h"
+#endif /* BUILDFIXED */
+ state->lencode = lenfix;
+ state->lenbits = 9;
+ state->distcode = distfix;
+ state->distbits = 5;
+}
+
+#ifdef MAKEFIXED
+#include <stdio.h>
+
+/*
+ Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also
+ defines BUILDFIXED, so the tables are built on the fly. makefixed() writes
+ those tables to stdout, which would be piped to inffixed.h. A small program
+ can simply call makefixed to do this:
+
+ void makefixed(void);
+
+ int main(void)
+ {
+ makefixed();
+ return 0;
+ }
+
+ Then that can be linked with zlib built with MAKEFIXED defined and run:
+
+ a.out > inffixed.h
+ */
+void makefixed()
+{
+ unsigned low, size;
+ struct inflate_state state;
+
+ fixedtables(&state);
+ puts(" /* inffixed.h -- table for decoding fixed codes");
+ puts(" * Generated automatically by makefixed().");
+ puts(" */");
+ puts("");
+ puts(" /* WARNING: this file should *not* be used by applications.");
+ puts(" It is part of the implementation of this library and is");
+ puts(" subject to change. Applications should only use zlib.h.");
+ puts(" */");
+ puts("");
+ size = 1U << 9;
+ printf(" static const code lenfix[%u] = {", size);
+ low = 0;
+ for (;;) {
+ if ((low % 7) == 0) printf("\n ");
+ printf("{%u,%u,%d}", state.lencode[low].op, state.lencode[low].bits,
+ state.lencode[low].val);
+ if (++low == size) break;
+ putchar(',');
+ }
+ puts("\n };");
+ size = 1U << 5;
+ printf("\n static const code distfix[%u] = {", size);
+ low = 0;
+ for (;;) {
+ if ((low % 6) == 0) printf("\n ");
+ printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits,
+ state.distcode[low].val);
+ if (++low == size) break;
+ putchar(',');
+ }
+ puts("\n };");
+}
+#endif /* MAKEFIXED */
+
+/*
+ Update the window with the last wsize (normally 32K) bytes written before
+ returning. If window does not exist yet, create it. This is only called
+ when a window is already in use, or when output has been written during this
+ inflate call, but the end of the deflate stream has not been reached yet.
+ It is also called to create a window for dictionary data when a dictionary
+ is loaded.
+
+ Providing output buffers larger than 32K to inflate() should provide a speed
+ advantage, since only the last 32K of output is copied to the sliding window
+ upon return from inflate(), and since all distances after the first 32K of
+ output will fall in the output data, making match copies simpler and faster.
+ The advantage may be dependent on the size of the processor's data caches.
+ */
+local int updatewindow(strm, out)
+z_streamp strm;
+unsigned out;
+{
+ struct inflate_state FAR *state;
+ unsigned copy, dist;
+
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* if it hasn't been done already, allocate space for the window */
+ if (state->window == Z_NULL) {
+ state->window = (unsigned char FAR *)
+ ZALLOC(strm, 1U << state->wbits,
+ sizeof(unsigned char));
+ if (state->window == Z_NULL) return 1;
+ }
+
+ /* if window not in use yet, initialize */
+ if (state->wsize == 0) {
+ state->wsize = 1U << state->wbits;
+ state->wnext = 0;
+ state->whave = 0;
+ }
+
+ /* copy state->wsize or less output bytes into the circular window */
+ copy = out - strm->avail_out;
+ if (copy >= state->wsize) {
+ zmemcpy(state->window, strm->next_out - state->wsize, state->wsize);
+ state->wnext = 0;
+ state->whave = state->wsize;
+ }
+ else {
+ dist = state->wsize - state->wnext;
+ if (dist > copy) dist = copy;
+ zmemcpy(state->window + state->wnext, strm->next_out - copy, dist);
+ copy -= dist;
+ if (copy) {
+ zmemcpy(state->window, strm->next_out - copy, copy);
+ state->wnext = copy;
+ state->whave = state->wsize;
+ }
+ else {
+ state->wnext += dist;
+ if (state->wnext == state->wsize) state->wnext = 0;
+ if (state->whave < state->wsize) state->whave += dist;
+ }
+ }
+ return 0;
+}
+
+/* Macros for inflate(): */
+
+/* check function to use adler32() for zlib or crc32() for gzip */
+#ifdef GUNZIP
+# define UPDATE(check, buf, len) \
+ (state->flags ? crc32(check, buf, len) : adler32(check, buf, len))
+#else
+# define UPDATE(check, buf, len) adler32(check, buf, len)
+#endif
+
+/* check macros for header crc */
+#ifdef GUNZIP
+# define CRC2(check, word) \
+ do { \
+ hbuf[0] = (unsigned char)(word); \
+ hbuf[1] = (unsigned char)((word) >> 8); \
+ check = crc32(check, hbuf, 2); \
+ } while (0)
+
+# define CRC4(check, word) \
+ do { \
+ hbuf[0] = (unsigned char)(word); \
+ hbuf[1] = (unsigned char)((word) >> 8); \
+ hbuf[2] = (unsigned char)((word) >> 16); \
+ hbuf[3] = (unsigned char)((word) >> 24); \
+ check = crc32(check, hbuf, 4); \
+ } while (0)
+#endif
+
+/* Load registers with state in inflate() for speed */
+#define LOAD() \
+ do { \
+ put = strm->next_out; \
+ left = strm->avail_out; \
+ next = strm->next_in; \
+ have = strm->avail_in; \
+ hold = state->hold; \
+ bits = state->bits; \
+ } while (0)
+
+/* Restore state from registers in inflate() */
+#define RESTORE() \
+ do { \
+ strm->next_out = put; \
+ strm->avail_out = left; \
+ strm->next_in = next; \
+ strm->avail_in = have; \
+ state->hold = hold; \
+ state->bits = bits; \
+ } while (0)
+
+/* Clear the input bit accumulator */
+#define INITBITS() \
+ do { \
+ hold = 0; \
+ bits = 0; \
+ } while (0)
+
+/* Get a byte of input into the bit accumulator, or return from inflate()
+ if there is no input available. */
+#define PULLBYTE() \
+ do { \
+ if (have == 0) goto inf_leave; \
+ have--; \
+ hold += (unsigned long)(*next++) << bits; \
+ bits += 8; \
+ } while (0)
+
+/* Assure that there are at least n bits in the bit accumulator. If there is
+ not enough available input to do that, then return from inflate(). */
+#define NEEDBITS(n) \
+ do { \
+ while (bits < (unsigned)(n)) \
+ PULLBYTE(); \
+ } while (0)
+
+/* Return the low n bits of the bit accumulator (n < 16) */
+#define BITS(n) \
+ ((unsigned)hold & ((1U << (n)) - 1))
+
+/* Remove n bits from the bit accumulator */
+#define DROPBITS(n) \
+ do { \
+ hold >>= (n); \
+ bits -= (unsigned)(n); \
+ } while (0)
+
+/* Remove zero to seven bits as needed to go to a byte boundary */
+#define BYTEBITS() \
+ do { \
+ hold >>= bits & 7; \
+ bits -= bits & 7; \
+ } while (0)
+
+/* Reverse the bytes in a 32-bit value */
+#define REVERSE(q) \
+ ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
+ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
+
+/*
+ inflate() uses a state machine to process as much input data and generate as
+ much output data as possible before returning. The state machine is
+ structured roughly as follows:
+
+ for (;;) switch (state) {
+ ...
+ case STATEn:
+ if (not enough input data or output space to make progress)
+ return;
+ ... make progress ...
+ state = STATEm;
+ break;
+ ...
+ }
+
+ so when inflate() is called again, the same case is attempted again, and
+ if the appropriate resources are provided, the machine proceeds to the
+ next state. The NEEDBITS() macro is usually the way the state evaluates
+ whether it can proceed or should return. NEEDBITS() does the return if
+ the requested bits are not available. The typical use of the BITS macros
+ is:
+
+ NEEDBITS(n);
+ ... do something with BITS(n) ...
+ DROPBITS(n);
+
+ where NEEDBITS(n) either returns from inflate() if there isn't enough
+ input left to load n bits into the accumulator, or it continues. BITS(n)
+ gives the low n bits in the accumulator. When done, DROPBITS(n) drops
+ the low n bits off the accumulator. INITBITS() clears the accumulator
+ and sets the number of available bits to zero. BYTEBITS() discards just
+ enough bits to put the accumulator on a byte boundary. After BYTEBITS()
+ and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
+
+ NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
+ if there is no input available. The decoding of variable length codes uses
+ PULLBYTE() directly in order to pull just enough bytes to decode the next
+ code, and no more.
+
+ Some states loop until they get enough input, making sure that enough
+ state information is maintained to continue the loop where it left off
+ if NEEDBITS() returns in the loop. For example, want, need, and keep
+ would all have to actually be part of the saved state in case NEEDBITS()
+ returns:
+
+ case STATEw:
+ while (want < need) {
+ NEEDBITS(n);
+ keep[want++] = BITS(n);
+ DROPBITS(n);
+ }
+ state = STATEx;
+ case STATEx:
+
+ As shown above, if the next state is also the next case, then the break
+ is omitted.
+
+ A state may also return if there is not enough output space available to
+ complete that state. Those states are copying stored data, writing a
+ literal byte, and copying a matching string.
+
+ When returning, a "goto inf_leave" is used to update the total counters,
+ update the check value, and determine whether any progress has been made
+ during that inflate() call in order to return the proper return code.
+ Progress is defined as a change in either strm->avail_in or strm->avail_out.
+ When there is a window, goto inf_leave will update the window with the last
+ output written. If a goto inf_leave occurs in the middle of decompression
+ and there is no window currently, goto inf_leave will create one and copy
+ output to the window for the next call of inflate().
+
+ In this implementation, the flush parameter of inflate() only affects the
+ return code (per zlib.h). inflate() always writes as much as possible to
+ strm->next_out, given the space available and the provided input--the effect
+ documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
+ the allocation of and copying into a sliding window until necessary, which
+ provides the effect documented in zlib.h for Z_FINISH when the entire input
+ stream available. So the only thing the flush parameter actually does is:
+ when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
+ will return Z_BUF_ERROR if it has not reached the end of the stream.
+ */
+
+int ZEXPORT inflate(strm, flush)
+z_streamp strm;
+int flush;
+{
+ struct inflate_state FAR *state;
+ unsigned char FAR *next; /* next input */
+ unsigned char FAR *put; /* next output */
+ unsigned have, left; /* available input and output */
+ unsigned long hold; /* bit buffer */
+ unsigned bits; /* bits in bit buffer */
+ unsigned in, out; /* save starting available input and output */
+ unsigned copy; /* number of stored or match bytes to copy */
+ unsigned char FAR *from; /* where to copy match bytes from */
+ code here; /* current decoding table entry */
+ code last; /* parent table entry */
+ unsigned len; /* length to copy for repeats, bits to drop */
+ int ret; /* return code */
+#ifdef GUNZIP
+ unsigned char hbuf[4]; /* buffer for gzip header crc calculation */
+#endif
+ static const unsigned short order[19] = /* permutation of code lengths */
+ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+ if (strm == Z_NULL || strm->state == Z_NULL || strm->next_out == Z_NULL ||
+ (strm->next_in == Z_NULL && strm->avail_in != 0))
+ return Z_STREAM_ERROR;
+
+ state = (struct inflate_state FAR *)strm->state;
+ if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
+ LOAD();
+ in = have;
+ out = left;
+ ret = Z_OK;
+ for (;;)
+ switch (state->mode) {
+ case HEAD:
+ if (state->wrap == 0) {
+ state->mode = TYPEDO;
+ break;
+ }
+ NEEDBITS(16);
+#ifdef GUNZIP
+ if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */
+ state->check = crc32(0L, Z_NULL, 0);
+ CRC2(state->check, hold);
+ INITBITS();
+ state->mode = FLAGS;
+ break;
+ }
+ state->flags = 0; /* expect zlib header */
+ if (state->head != Z_NULL)
+ state->head->done = -1;
+ if (!(state->wrap & 1) || /* check if zlib header allowed */
+#else
+ if (
+#endif
+ ((BITS(8) << 8) + (hold >> 8)) % 31) {
+ strm->msg = (char *)"incorrect header check";
+ state->mode = BAD;
+ break;
+ }
+ if (BITS(4) != Z_DEFLATED) {
+ strm->msg = (char *)"unknown compression method";
+ state->mode = BAD;
+ break;
+ }
+ DROPBITS(4);
+ len = BITS(4) + 8;
+ if (state->wbits == 0)
+ state->wbits = len;
+ else if (len > state->wbits) {
+ strm->msg = (char *)"invalid window size";
+ state->mode = BAD;
+ break;
+ }
+ state->dmax = 1U << len;
+ Tracev((stderr, "inflate: zlib header ok\n"));
+ strm->adler = state->check = adler32(0L, Z_NULL, 0);
+ state->mode = hold & 0x200 ? DICTID : TYPE;
+ INITBITS();
+ break;
+#ifdef GUNZIP
+ case FLAGS:
+ NEEDBITS(16);
+ state->flags = (int)(hold);
+ if ((state->flags & 0xff) != Z_DEFLATED) {
+ strm->msg = (char *)"unknown compression method";
+ state->mode = BAD;
+ break;
+ }
+ if (state->flags & 0xe000) {
+ strm->msg = (char *)"unknown header flags set";
+ state->mode = BAD;
+ break;
+ }
+ if (state->head != Z_NULL)
+ state->head->text = (int)((hold >> 8) & 1);
+ if (state->flags & 0x0200) CRC2(state->check, hold);
+ INITBITS();
+ state->mode = TIME;
+ case TIME:
+ NEEDBITS(32);
+ if (state->head != Z_NULL)
+ state->head->time = hold;
+ if (state->flags & 0x0200) CRC4(state->check, hold);
+ INITBITS();
+ state->mode = OS;
+ case OS:
+ NEEDBITS(16);
+ if (state->head != Z_NULL) {
+ state->head->xflags = (int)(hold & 0xff);
+ state->head->os = (int)(hold >> 8);
+ }
+ if (state->flags & 0x0200) CRC2(state->check, hold);
+ INITBITS();
+ state->mode = EXLEN;
+ case EXLEN:
+ if (state->flags & 0x0400) {
+ NEEDBITS(16);
+ state->length = (unsigned)(hold);
+ if (state->head != Z_NULL)
+ state->head->extra_len = (unsigned)hold;
+ if (state->flags & 0x0200) CRC2(state->check, hold);
+ INITBITS();
+ }
+ else if (state->head != Z_NULL)
+ state->head->extra = Z_NULL;
+ state->mode = EXTRA;
+ case EXTRA:
+ if (state->flags & 0x0400) {
+ copy = state->length;
+ if (copy > have) copy = have;
+ if (copy) {
+ if (state->head != Z_NULL &&
+ state->head->extra != Z_NULL) {
+ len = state->head->extra_len - state->length;
+ zmemcpy(state->head->extra + len, next,
+ len + copy > state->head->extra_max ?
+ state->head->extra_max - len : copy);
+ }
+ if (state->flags & 0x0200)
+ state->check = crc32(state->check, next, copy);
+ have -= copy;
+ next += copy;
+ state->length -= copy;
+ }
+ if (state->length) goto inf_leave;
+ }
+ state->length = 0;
+ state->mode = NAME;
+ case NAME:
+ if (state->flags & 0x0800) {
+ if (have == 0) goto inf_leave;
+ copy = 0;
+ do {
+ len = (unsigned)(next[copy++]);
+ if (state->head != Z_NULL &&
+ state->head->name != Z_NULL &&
+ state->length < state->head->name_max)
+ state->head->name[state->length++] = len;
+ } while (len && copy < have);
+ if (state->flags & 0x0200)
+ state->check = crc32(state->check, next, copy);
+ have -= copy;
+ next += copy;
+ if (len) goto inf_leave;
+ }
+ else if (state->head != Z_NULL)
+ state->head->name = Z_NULL;
+ state->length = 0;
+ state->mode = COMMENT;
+ case COMMENT:
+ if (state->flags & 0x1000) {
+ if (have == 0) goto inf_leave;
+ copy = 0;
+ do {
+ len = (unsigned)(next[copy++]);
+ if (state->head != Z_NULL &&
+ state->head->comment != Z_NULL &&
+ state->length < state->head->comm_max)
+ state->head->comment[state->length++] = len;
+ } while (len && copy < have);
+ if (state->flags & 0x0200)
+ state->check = crc32(state->check, next, copy);
+ have -= copy;
+ next += copy;
+ if (len) goto inf_leave;
+ }
+ else if (state->head != Z_NULL)
+ state->head->comment = Z_NULL;
+ state->mode = HCRC;
+ case HCRC:
+ if (state->flags & 0x0200) {
+ NEEDBITS(16);
+ if (hold != (state->check & 0xffff)) {
+ strm->msg = (char *)"header crc mismatch";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ }
+ if (state->head != Z_NULL) {
+ state->head->hcrc = (int)((state->flags >> 9) & 1);
+ state->head->done = 1;
+ }
+ strm->adler = state->check = crc32(0L, Z_NULL, 0);
+ state->mode = TYPE;
+ break;
+#endif
+ case DICTID:
+ NEEDBITS(32);
+ strm->adler = state->check = REVERSE(hold);
+ INITBITS();
+ state->mode = DICT;
+ case DICT:
+ if (state->havedict == 0) {
+ RESTORE();
+ return Z_NEED_DICT;
+ }
+ strm->adler = state->check = adler32(0L, Z_NULL, 0);
+ state->mode = TYPE;
+ case TYPE:
+ if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave;
+ case TYPEDO:
+ if (state->last) {
+ BYTEBITS();
+ state->mode = CHECK;
+ break;
+ }
+ NEEDBITS(3);
+ state->last = BITS(1);
+ DROPBITS(1);
+ switch (BITS(2)) {
+ case 0: /* stored block */
+ Tracev((stderr, "inflate: stored block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = STORED;
+ break;
+ case 1: /* fixed block */
+ fixedtables(state);
+ Tracev((stderr, "inflate: fixed codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = LEN_; /* decode codes */
+ if (flush == Z_TREES) {
+ DROPBITS(2);
+ goto inf_leave;
+ }
+ break;
+ case 2: /* dynamic block */
+ Tracev((stderr, "inflate: dynamic codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = TABLE;
+ break;
+ case 3:
+ strm->msg = (char *)"invalid block type";
+ state->mode = BAD;
+ }
+ DROPBITS(2);
+ break;
+ case STORED:
+ BYTEBITS(); /* go to byte boundary */
+ NEEDBITS(32);
+ if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
+ strm->msg = (char *)"invalid stored block lengths";
+ state->mode = BAD;
+ break;
+ }
+ state->length = (unsigned)hold & 0xffff;
+ Tracev((stderr, "inflate: stored length %u\n",
+ state->length));
+ INITBITS();
+ state->mode = COPY_;
+ if (flush == Z_TREES) goto inf_leave;
+ case COPY_:
+ state->mode = COPY;
+ case COPY:
+ copy = state->length;
+ if (copy) {
+ if (copy > have) copy = have;
+ if (copy > left) copy = left;
+ if (copy == 0) goto inf_leave;
+ zmemcpy(put, next, copy);
+ have -= copy;
+ next += copy;
+ left -= copy;
+ put += copy;
+ state->length -= copy;
+ break;
+ }
+ Tracev((stderr, "inflate: stored end\n"));
+ state->mode = TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14);
+ state->nlen = BITS(5) + 257;
+ DROPBITS(5);
+ state->ndist = BITS(5) + 1;
+ DROPBITS(5);
+ state->ncode = BITS(4) + 4;
+ DROPBITS(4);
+#ifndef PKZIP_BUG_WORKAROUND
+ if (state->nlen > 286 || state->ndist > 30) {
+ strm->msg = (char *)"too many length or distance symbols";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ Tracev((stderr, "inflate: table sizes ok\n"));
+ state->have = 0;
+ state->mode = LENLENS;
+ case LENLENS:
+ while (state->have < state->ncode) {
+ NEEDBITS(3);
+ state->lens[order[state->have++]] = (unsigned short)BITS(3);
+ DROPBITS(3);
+ }
+ while (state->have < 19)
+ state->lens[order[state->have++]] = 0;
+ state->next = state->codes;
+ state->lencode = (code const FAR *)(state->next);
+ state->lenbits = 7;
+ ret = inflate_table(CODES, state->lens, 19, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid code lengths set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: code lengths ok\n"));
+ state->have = 0;
+ state->mode = CODELENS;
+ case CODELENS:
+ while (state->have < state->nlen + state->ndist) {
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.val < 16) {
+ NEEDBITS(here.bits);
+ DROPBITS(here.bits);
+ state->lens[state->have++] = here.val;
+ }
+ else {
+ if (here.val == 16) {
+ NEEDBITS(here.bits + 2);
+ DROPBITS(here.bits);
+ if (state->have == 0) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ len = state->lens[state->have - 1];
+ copy = 3 + BITS(2);
+ DROPBITS(2);
+ }
+ else if (here.val == 17) {
+ NEEDBITS(here.bits + 3);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 3 + BITS(3);
+ DROPBITS(3);
+ }
+ else {
+ NEEDBITS(here.bits + 7);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 11 + BITS(7);
+ DROPBITS(7);
+ }
+ if (state->have + copy > state->nlen + state->ndist) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ while (copy--)
+ state->lens[state->have++] = (unsigned short)len;
+ }
+ }
+
+ /* handle error breaks in while */
+ if (state->mode == BAD) break;
+
+ /* check for end-of-block code (better have one) */
+ if (state->lens[256] == 0) {
+ strm->msg = (char *)"invalid code -- missing end-of-block";
+ state->mode = BAD;
+ break;
+ }
+
+ /* build code tables -- note: do not change the lenbits or distbits
+ values here (9 and 6) without reading the comments in inftrees.h
+ concerning the ENOUGH constants, which depend on those values */
+ state->next = state->codes;
+ state->lencode = (code const FAR *)(state->next);
+ state->lenbits = 9;
+ ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid literal/lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->distcode = (code const FAR *)(state->next);
+ state->distbits = 6;
+ ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
+ &(state->next), &(state->distbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid distances set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: codes ok\n"));
+ state->mode = LEN_;
+ if (flush == Z_TREES) goto inf_leave;
+ case LEN_:
+ state->mode = LEN;
+ case LEN:
+ if (have >= 6 && left >= 258) {
+ RESTORE();
+ inflate_fast(strm, out);
+ LOAD();
+ if (state->mode == TYPE)
+ state->back = -1;
+ break;
+ }
+ state->back = 0;
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.op && (here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->lencode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ state->back += last.bits;
+ }
+ DROPBITS(here.bits);
+ state->back += here.bits;
+ state->length = (unsigned)here.val;
+ if ((int)(here.op) == 0) {
+ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here.val));
+ state->mode = LIT;
+ break;
+ }
+ if (here.op & 32) {
+ Tracevv((stderr, "inflate: end of block\n"));
+ state->back = -1;
+ state->mode = TYPE;
+ break;
+ }
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ state->extra = (unsigned)(here.op) & 15;
+ state->mode = LENEXT;
+ case LENEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->length += BITS(state->extra);
+ DROPBITS(state->extra);
+ state->back += state->extra;
+ }
+ Tracevv((stderr, "inflate: length %u\n", state->length));
+ state->was = state->length;
+ state->mode = DIST;
+ case DIST:
+ for (;;) {
+ here = state->distcode[BITS(state->distbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if ((here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->distcode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ state->back += last.bits;
+ }
+ DROPBITS(here.bits);
+ state->back += here.bits;
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ state->offset = (unsigned)here.val;
+ state->extra = (unsigned)(here.op) & 15;
+ state->mode = DISTEXT;
+ case DISTEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->offset += BITS(state->extra);
+ DROPBITS(state->extra);
+ state->back += state->extra;
+ }
+#ifdef INFLATE_STRICT
+ if (state->offset > state->dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ Tracevv((stderr, "inflate: distance %u\n", state->offset));
+ state->mode = MATCH;
+ case MATCH:
+ if (left == 0) goto inf_leave;
+ copy = out - left;
+ if (state->offset > copy) { /* copy from window */
+ copy = state->offset - copy;
+ if (copy > state->whave) {
+ if (state->sane) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
+ Trace((stderr, "inflate.c too far\n"));
+ copy -= state->whave;
+ if (copy > state->length) copy = state->length;
+ if (copy > left) copy = left;
+ left -= copy;
+ state->length -= copy;
+ do {
+ *put++ = 0;
+ } while (--copy);
+ if (state->length == 0) state->mode = LEN;
+ break;
+#endif
+ }
+ if (copy > state->wnext) {
+ copy -= state->wnext;
+ from = state->window + (state->wsize - copy);
+ }
+ else
+ from = state->window + (state->wnext - copy);
+ if (copy > state->length) copy = state->length;
+ }
+ else { /* copy from output */
+ from = put - state->offset;
+ copy = state->length;
+ }
+ if (copy > left) copy = left;
+ left -= copy;
+ state->length -= copy;
+ do {
+ *put++ = *from++;
+ } while (--copy);
+ if (state->length == 0) state->mode = LEN;
+ break;
+ case LIT:
+ if (left == 0) goto inf_leave;
+ *put++ = (unsigned char)(state->length);
+ left--;
+ state->mode = LEN;
+ break;
+ case CHECK:
+ if (state->wrap) {
+ NEEDBITS(32);
+ out -= left;
+ strm->total_out += out;
+ state->total += out;
+ if (out)
+ strm->adler = state->check =
+ UPDATE(state->check, put - out, out);
+ out = left;
+ if ((
+#ifdef GUNZIP
+ state->flags ? hold :
+#endif
+ REVERSE(hold)) != state->check) {
+ strm->msg = (char *)"incorrect data check";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ Tracev((stderr, "inflate: check matches trailer\n"));
+ }
+#ifdef GUNZIP
+ state->mode = LENGTH;
+ case LENGTH:
+ if (state->wrap && state->flags) {
+ NEEDBITS(32);
+ if (hold != (state->total & 0xffffffffUL)) {
+ strm->msg = (char *)"incorrect length check";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ Tracev((stderr, "inflate: length matches trailer\n"));
+ }
+#endif
+ state->mode = DONE;
+ case DONE:
+ ret = Z_STREAM_END;
+ goto inf_leave;
+ case BAD:
+ ret = Z_DATA_ERROR;
+ goto inf_leave;
+ case MEM:
+ return Z_MEM_ERROR;
+ case SYNC:
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ /*
+ Return from inflate(), updating the total counts and the check value.
+ If there was no progress during the inflate() call, return a buffer
+ error. Call updatewindow() to create and/or update the window state.
+ Note: a memory error from inflate() is non-recoverable.
+ */
+ inf_leave:
+ RESTORE();
+ if (state->wsize || (state->mode < CHECK && out != strm->avail_out))
+ if (updatewindow(strm, out)) {
+ state->mode = MEM;
+ return Z_MEM_ERROR;
+ }
+ in -= strm->avail_in;
+ out -= strm->avail_out;
+ strm->total_in += in;
+ strm->total_out += out;
+ state->total += out;
+ if (state->wrap && out)
+ strm->adler = state->check =
+ UPDATE(state->check, strm->next_out - out, out);
+ strm->data_type = state->bits + (state->last ? 64 : 0) +
+ (state->mode == TYPE ? 128 : 0) +
+ (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0);
+ if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
+ ret = Z_BUF_ERROR;
+ return ret;
+}
+
+int ZEXPORT inflateEnd(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+ if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0)
+ return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (state->window != Z_NULL) ZFREE(strm, state->window);
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+ Tracev((stderr, "inflate: end\n"));
+ return Z_OK;
+}
+
+int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength)
+z_streamp strm;
+const Bytef *dictionary;
+uInt dictLength;
+{
+ struct inflate_state FAR *state;
+ unsigned long id;
+
+ /* check state */
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (state->wrap != 0 && state->mode != DICT)
+ return Z_STREAM_ERROR;
+
+ /* check for correct dictionary id */
+ if (state->mode == DICT) {
+ id = adler32(0L, Z_NULL, 0);
+ id = adler32(id, dictionary, dictLength);
+ if (id != state->check)
+ return Z_DATA_ERROR;
+ }
+
+ /* copy dictionary to window */
+ if (updatewindow(strm, strm->avail_out)) {
+ state->mode = MEM;
+ return Z_MEM_ERROR;
+ }
+ if (dictLength > state->wsize) {
+ zmemcpy(state->window, dictionary + dictLength - state->wsize,
+ state->wsize);
+ state->whave = state->wsize;
+ }
+ else {
+ zmemcpy(state->window + state->wsize - dictLength, dictionary,
+ dictLength);
+ state->whave = dictLength;
+ }
+ state->havedict = 1;
+ Tracev((stderr, "inflate: dictionary set\n"));
+ return Z_OK;
+}
+
+int ZEXPORT inflateGetHeader(strm, head)
+z_streamp strm;
+gz_headerp head;
+{
+ struct inflate_state FAR *state;
+
+ /* check state */
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if ((state->wrap & 2) == 0) return Z_STREAM_ERROR;
+
+ /* save header structure */
+ state->head = head;
+ head->done = 0;
+ return Z_OK;
+}
+
+/*
+ Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
+ or when out of input. When called, *have is the number of pattern bytes
+ found in order so far, in 0..3. On return *have is updated to the new
+ state. If on return *have equals four, then the pattern was found and the
+ return value is how many bytes were read including the last byte of the
+ pattern. If *have is less than four, then the pattern has not been found
+ yet and the return value is len. In the latter case, syncsearch() can be
+ called again with more data and the *have state. *have is initialized to
+ zero for the first call.
+ */
+local unsigned syncsearch(have, buf, len)
+unsigned FAR *have;
+unsigned char FAR *buf;
+unsigned len;
+{
+ unsigned got;
+ unsigned next;
+
+ got = *have;
+ next = 0;
+ while (next < len && got < 4) {
+ if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
+ got++;
+ else if (buf[next])
+ got = 0;
+ else
+ got = 4 - got;
+ next++;
+ }
+ *have = got;
+ return next;
+}
+
+int ZEXPORT inflateSync(strm)
+z_streamp strm;
+{
+ unsigned len; /* number of bytes to look at or looked at */
+ unsigned long in, out; /* temporary to save total_in and total_out */
+ unsigned char buf[4]; /* to restore bit buffer to byte string */
+ struct inflate_state FAR *state;
+
+ /* check parameters */
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
+
+ /* if first time, start search in bit buffer */
+ if (state->mode != SYNC) {
+ state->mode = SYNC;
+ state->hold <<= state->bits & 7;
+ state->bits -= state->bits & 7;
+ len = 0;
+ while (state->bits >= 8) {
+ buf[len++] = (unsigned char)(state->hold);
+ state->hold >>= 8;
+ state->bits -= 8;
+ }
+ state->have = 0;
+ syncsearch(&(state->have), buf, len);
+ }
+
+ /* search available input */
+ len = syncsearch(&(state->have), strm->next_in, strm->avail_in);
+ strm->avail_in -= len;
+ strm->next_in += len;
+ strm->total_in += len;
+
+ /* return no joy or set up to restart inflate() on a new block */
+ if (state->have != 4) return Z_DATA_ERROR;
+ in = strm->total_in; out = strm->total_out;
+ inflateReset(strm);
+ strm->total_in = in; strm->total_out = out;
+ state->mode = TYPE;
+ return Z_OK;
+}
+
+/*
+ Returns true if inflate is currently at the end of a block generated by
+ Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ implementation to provide an additional safety check. PPP uses
+ Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
+ block. When decompressing, PPP checks that at the end of input packet,
+ inflate is waiting for these length bytes.
+ */
+int ZEXPORT inflateSyncPoint(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ return state->mode == STORED && state->bits == 0;
+}
+
+int ZEXPORT inflateCopy(dest, source)
+z_streamp dest;
+z_streamp source;
+{
+ struct inflate_state FAR *state;
+ struct inflate_state FAR *copy;
+ unsigned char FAR *window;
+ unsigned wsize;
+
+ /* check input */
+ if (dest == Z_NULL || source == Z_NULL || source->state == Z_NULL ||
+ source->zalloc == (alloc_func)0 || source->zfree == (free_func)0)
+ return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)source->state;
+
+ /* allocate space */
+ copy = (struct inflate_state FAR *)
+ ZALLOC(source, 1, sizeof(struct inflate_state));
+ if (copy == Z_NULL) return Z_MEM_ERROR;
+ window = Z_NULL;
+ if (state->window != Z_NULL) {
+ window = (unsigned char FAR *)
+ ZALLOC(source, 1U << state->wbits, sizeof(unsigned char));
+ if (window == Z_NULL) {
+ ZFREE(source, copy);
+ return Z_MEM_ERROR;
+ }
+ }
+
+ /* copy state */
+ zmemcpy(dest, source, sizeof(z_stream));
+ zmemcpy(copy, state, sizeof(struct inflate_state));
+ if (state->lencode >= state->codes &&
+ state->lencode <= state->codes + ENOUGH - 1) {
+ copy->lencode = copy->codes + (state->lencode - state->codes);
+ copy->distcode = copy->codes + (state->distcode - state->codes);
+ }
+ copy->next = copy->codes + (state->next - state->codes);
+ if (window != Z_NULL) {
+ wsize = 1U << state->wbits;
+ zmemcpy(window, state->window, wsize);
+ }
+ copy->window = window;
+ dest->state = (struct internal_state FAR *)copy;
+ return Z_OK;
+}
+
+int ZEXPORT inflateUndermine(strm, subvert)
+z_streamp strm;
+int subvert;
+{
+ struct inflate_state FAR *state;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ state->sane = !subvert;
+#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
+ return Z_OK;
+#else
+ state->sane = 1;
+ return Z_DATA_ERROR;
+#endif
+}
+
+long ZEXPORT inflateMark(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16;
+ state = (struct inflate_state FAR *)strm->state;
+ return ((long)(state->back) << 16) +
+ (state->mode == COPY ? state->length :
+ (state->mode == MATCH ? state->was - state->length : 0));
+}
--- /dev/null
+/* inflate.h -- internal inflate state definition
+ * Copyright (C) 1995-2009 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* define NO_GZIP when compiling if you want to disable gzip header and
+ trailer decoding by inflate(). NO_GZIP would be used to avoid linking in
+ the crc code when it is not needed. For shared libraries, gzip decoding
+ should be left enabled. */
+#ifndef NO_GZIP
+# define GUNZIP
+#endif
+
+/* Possible inflate modes between inflate() calls */
+typedef enum {
+ HEAD, /* i: waiting for magic header */
+ FLAGS, /* i: waiting for method and flags (gzip) */
+ TIME, /* i: waiting for modification time (gzip) */
+ OS, /* i: waiting for extra flags and operating system (gzip) */
+ EXLEN, /* i: waiting for extra length (gzip) */
+ EXTRA, /* i: waiting for extra bytes (gzip) */
+ NAME, /* i: waiting for end of file name (gzip) */
+ COMMENT, /* i: waiting for end of comment (gzip) */
+ HCRC, /* i: waiting for header crc (gzip) */
+ DICTID, /* i: waiting for dictionary check value */
+ DICT, /* waiting for inflateSetDictionary() call */
+ TYPE, /* i: waiting for type bits, including last-flag bit */
+ TYPEDO, /* i: same, but skip check to exit inflate on new block */
+ STORED, /* i: waiting for stored size (length and complement) */
+ COPY_, /* i/o: same as COPY below, but only first time in */
+ COPY, /* i/o: waiting for input or output to copy stored block */
+ TABLE, /* i: waiting for dynamic block table lengths */
+ LENLENS, /* i: waiting for code length code lengths */
+ CODELENS, /* i: waiting for length/lit and distance code lengths */
+ LEN_, /* i: same as LEN below, but only first time in */
+ LEN, /* i: waiting for length/lit/eob code */
+ LENEXT, /* i: waiting for length extra bits */
+ DIST, /* i: waiting for distance code */
+ DISTEXT, /* i: waiting for distance extra bits */
+ MATCH, /* o: waiting for output space to copy string */
+ LIT, /* o: waiting for output space to write literal */
+ CHECK, /* i: waiting for 32-bit check value */
+ LENGTH, /* i: waiting for 32-bit length (gzip) */
+ DONE, /* finished check, done -- remain here until reset */
+ BAD, /* got a data error -- remain here until reset */
+ MEM, /* got an inflate() memory error -- remain here until reset */
+ SYNC /* looking for synchronization bytes to restart inflate() */
+} inflate_mode;
+
+/*
+ State transitions between above modes -
+
+ (most modes can go to BAD or MEM on error -- not shown for clarity)
+
+ Process header:
+ HEAD -> (gzip) or (zlib) or (raw)
+ (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT ->
+ HCRC -> TYPE
+ (zlib) -> DICTID or TYPE
+ DICTID -> DICT -> TYPE
+ (raw) -> TYPEDO
+ Read deflate blocks:
+ TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK
+ STORED -> COPY_ -> COPY -> TYPE
+ TABLE -> LENLENS -> CODELENS -> LEN_
+ LEN_ -> LEN
+ Read deflate codes in fixed or dynamic block:
+ LEN -> LENEXT or LIT or TYPE
+ LENEXT -> DIST -> DISTEXT -> MATCH -> LEN
+ LIT -> LEN
+ Process trailer:
+ CHECK -> LENGTH -> DONE
+ */
+
+/* state maintained between inflate() calls. Approximately 10K bytes. */
+struct inflate_state {
+ inflate_mode mode; /* current inflate mode */
+ int last; /* true if processing last block */
+ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
+ int havedict; /* true if dictionary provided */
+ int flags; /* gzip header method and flags (0 if zlib) */
+ unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
+ unsigned long check; /* protected copy of check value */
+ unsigned long total; /* protected copy of output count */
+ gz_headerp head; /* where to save gzip header information */
+ /* sliding window */
+ unsigned wbits; /* log base 2 of requested window size */
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned wnext; /* window write index */
+ unsigned char FAR *window; /* allocated sliding window, if needed */
+ /* bit accumulator */
+ unsigned long hold; /* input bit accumulator */
+ unsigned bits; /* number of bits in "in" */
+ /* for string and stored block copying */
+ unsigned length; /* literal or length of data to copy */
+ unsigned offset; /* distance back to copy string from */
+ /* for table and code decoding */
+ unsigned extra; /* extra bits needed */
+ /* fixed and dynamic code tables */
+ code const FAR *lencode; /* starting table for length/literal codes */
+ code const FAR *distcode; /* starting table for distance codes */
+ unsigned lenbits; /* index bits for lencode */
+ unsigned distbits; /* index bits for distcode */
+ /* dynamic table building */
+ unsigned ncode; /* number of code length code lengths */
+ unsigned nlen; /* number of length code lengths */
+ unsigned ndist; /* number of distance code lengths */
+ unsigned have; /* number of code lengths in lens[] */
+ code FAR *next; /* next available space in codes[] */
+ unsigned short lens[320]; /* temporary storage for code lengths */
+ unsigned short work[288]; /* work area for code table building */
+ code codes[ENOUGH]; /* space for code tables */
+ int sane; /* if false, allow invalid distance too far */
+ int back; /* bits back of last unprocessed length/lit */
+ unsigned was; /* initial length of match */
+};
--- /dev/null
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+
+#define MAXBITS 15
+
+const char inflate_copyright[] =
+ " inflate 1.2.5 Copyright 1995-2010 Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/*
+ Build a set of tables to decode the provided canonical Huffman code.
+ The code lengths are lens[0..codes-1]. The result starts at *table,
+ whose indices are 0..2^bits-1. work is a writable array of at least
+ lens shorts, which is used as a work area. type is the type of code
+ to be generated, CODES, LENS, or DISTS. On return, zero is success,
+ -1 is an invalid code, and +1 means that ENOUGH isn't enough. table
+ on return points to the next available entry's address. bits is the
+ requested root table index bits, and on return it is the actual root
+ table index bits. It will differ if the request is greater than the
+ longest code or if it is less than the shortest code.
+ */
+int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work)
+codetype type;
+unsigned short FAR *lens;
+unsigned codes;
+code FAR * FAR *table;
+unsigned FAR *bits;
+unsigned short FAR *work;
+{
+ unsigned len; /* a code's length in bits */
+ unsigned sym; /* index of code symbols */
+ unsigned min, max; /* minimum and maximum code lengths */
+ unsigned root; /* number of index bits for root table */
+ unsigned curr; /* number of index bits for current table */
+ unsigned drop; /* code bits to drop for sub-table */
+ int left; /* number of prefix codes available */
+ unsigned used; /* code entries in table used */
+ unsigned huff; /* Huffman code */
+ unsigned incr; /* for incrementing code, index */
+ unsigned fill; /* index for replicating entries */
+ unsigned low; /* low bits for current root entry */
+ unsigned mask; /* mask for low root bits */
+ code here; /* table entry for duplication */
+ code FAR *next; /* next available space in table */
+ const unsigned short FAR *base; /* base value table to use */
+ const unsigned short FAR *extra; /* extra bits table to use */
+ int end; /* use base and extra for symbol > end */
+ unsigned short count[MAXBITS+1]; /* number of codes of each length */
+ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
+ static const unsigned short lbase[31] = { /* Length codes 257..285 base */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ static const unsigned short lext[31] = { /* Length codes 257..285 extra */
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 73, 195};
+ static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577, 0, 0};
+ static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
+ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
+ 28, 28, 29, 29, 64, 64};
+
+ /*
+ Process a set of code lengths to create a canonical Huffman code. The
+ code lengths are lens[0..codes-1]. Each length corresponds to the
+ symbols 0..codes-1. The Huffman code is generated by first sorting the
+ symbols by length from short to long, and retaining the symbol order
+ for codes with equal lengths. Then the code starts with all zero bits
+ for the first code of the shortest length, and the codes are integer
+ increments for the same length, and zeros are appended as the length
+ increases. For the deflate format, these bits are stored backwards
+ from their more natural integer increment ordering, and so when the
+ decoding tables are built in the large loop below, the integer codes
+ are incremented backwards.
+
+ This routine assumes, but does not check, that all of the entries in
+ lens[] are in the range 0..MAXBITS. The caller must assure this.
+ 1..MAXBITS is interpreted as that code length. zero means that that
+ symbol does not occur in this code.
+
+ The codes are sorted by computing a count of codes for each length,
+ creating from that a table of starting indices for each length in the
+ sorted table, and then entering the symbols in order in the sorted
+ table. The sorted table is work[], with that space being provided by
+ the caller.
+
+ The length counts are used for other purposes as well, i.e. finding
+ the minimum and maximum length codes, determining if there are any
+ codes at all, checking for a valid set of lengths, and looking ahead
+ at length counts to determine sub-table sizes when building the
+ decoding tables.
+ */
+
+ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
+ for (len = 0; len <= MAXBITS; len++)
+ count[len] = 0;
+ for (sym = 0; sym < codes; sym++)
+ count[lens[sym]]++;
+
+ /* bound code lengths, force root to be within code lengths */
+ root = *bits;
+ for (max = MAXBITS; max >= 1; max--)
+ if (count[max] != 0) break;
+ if (root > max) root = max;
+ if (max == 0) { /* no symbols to code at all */
+ here.op = (unsigned char)64; /* invalid code marker */
+ here.bits = (unsigned char)1;
+ here.val = (unsigned short)0;
+ *(*table)++ = here; /* make a table to force an error */
+ *(*table)++ = here;
+ *bits = 1;
+ return 0; /* no symbols, but wait for decoding to report error */
+ }
+ for (min = 1; min < max; min++)
+ if (count[min] != 0) break;
+ if (root < min) root = min;
+
+ /* check for an over-subscribed or incomplete set of lengths */
+ left = 1;
+ for (len = 1; len <= MAXBITS; len++) {
+ left <<= 1;
+ left -= count[len];
+ if (left < 0) return -1; /* over-subscribed */
+ }
+ if (left > 0 && (type == CODES || max != 1))
+ return -1; /* incomplete set */
+
+ /* generate offsets into symbol table for each length for sorting */
+ offs[1] = 0;
+ for (len = 1; len < MAXBITS; len++)
+ offs[len + 1] = offs[len] + count[len];
+
+ /* sort symbols by length, by symbol order within each length */
+ for (sym = 0; sym < codes; sym++)
+ if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
+
+ /*
+ Create and fill in decoding tables. In this loop, the table being
+ filled is at next and has curr index bits. The code being used is huff
+ with length len. That code is converted to an index by dropping drop
+ bits off of the bottom. For codes where len is less than drop + curr,
+ those top drop + curr - len bits are incremented through all values to
+ fill the table with replicated entries.
+
+ root is the number of index bits for the root table. When len exceeds
+ root, sub-tables are created pointed to by the root entry with an index
+ of the low root bits of huff. This is saved in low to check for when a
+ new sub-table should be started. drop is zero when the root table is
+ being filled, and drop is root when sub-tables are being filled.
+
+ When a new sub-table is needed, it is necessary to look ahead in the
+ code lengths to determine what size sub-table is needed. The length
+ counts are used for this, and so count[] is decremented as codes are
+ entered in the tables.
+
+ used keeps track of how many table entries have been allocated from the
+ provided *table space. It is checked for LENS and DIST tables against
+ the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in
+ the initial root table size constants. See the comments in inftrees.h
+ for more information.
+
+ sym increments through all symbols, and the loop terminates when
+ all codes of length max, i.e. all codes, have been processed. This
+ routine permits incomplete codes, so another loop after this one fills
+ in the rest of the decoding tables with invalid code markers.
+ */
+
+ /* set up for code type */
+ switch (type) {
+ case CODES:
+ base = extra = work; /* dummy value--not used */
+ end = 19;
+ break;
+ case LENS:
+ base = lbase;
+ base -= 257;
+ extra = lext;
+ extra -= 257;
+ end = 256;
+ break;
+ default: /* DISTS */
+ base = dbase;
+ extra = dext;
+ end = -1;
+ }
+
+ /* initialize state for loop */
+ huff = 0; /* starting code */
+ sym = 0; /* starting code symbol */
+ len = min; /* starting code length */
+ next = *table; /* current table to fill in */
+ curr = root; /* current table index bits */
+ drop = 0; /* current bits to drop from code for index */
+ low = (unsigned)(-1); /* trigger new sub-table when len > root */
+ used = 1U << root; /* use root table entries */
+ mask = used - 1; /* mask for comparing low */
+
+ /* check available table space */
+ if ((type == LENS && used >= ENOUGH_LENS) ||
+ (type == DISTS && used >= ENOUGH_DISTS))
+ return 1;
+
+ /* process all codes and make table entries */
+ for (;;) {
+ /* create table entry */
+ here.bits = (unsigned char)(len - drop);
+ if ((int)(work[sym]) < end) {
+ here.op = (unsigned char)0;
+ here.val = work[sym];
+ }
+ else if ((int)(work[sym]) > end) {
+ here.op = (unsigned char)(extra[work[sym]]);
+ here.val = base[work[sym]];
+ }
+ else {
+ here.op = (unsigned char)(32 + 64); /* end of block */
+ here.val = 0;
+ }
+
+ /* replicate for those indices with low len bits equal to huff */
+ incr = 1U << (len - drop);
+ fill = 1U << curr;
+ min = fill; /* save offset to next table */
+ do {
+ fill -= incr;
+ next[(huff >> drop) + fill] = here;
+ } while (fill != 0);
+
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
+
+ /* go to next symbol, update count, len */
+ sym++;
+ if (--(count[len]) == 0) {
+ if (len == max) break;
+ len = lens[work[sym]];
+ }
+
+ /* create new sub-table if needed */
+ if (len > root && (huff & mask) != low) {
+ /* if first time, transition to sub-tables */
+ if (drop == 0)
+ drop = root;
+
+ /* increment past last table */
+ next += min; /* here min is 1 << curr */
+
+ /* determine length of next table */
+ curr = len - drop;
+ left = (int)(1 << curr);
+ while (curr + drop < max) {
+ left -= count[curr + drop];
+ if (left <= 0) break;
+ curr++;
+ left <<= 1;
+ }
+
+ /* check for enough space */
+ used += 1U << curr;
+ if ((type == LENS && used >= ENOUGH_LENS) ||
+ (type == DISTS && used >= ENOUGH_DISTS))
+ return 1;
+
+ /* point entry in root table to sub-table */
+ low = huff & mask;
+ (*table)[low].op = (unsigned char)curr;
+ (*table)[low].bits = (unsigned char)root;
+ (*table)[low].val = (unsigned short)(next - *table);
+ }
+ }
+
+ /*
+ Fill in rest of table for incomplete codes. This loop is similar to the
+ loop above in incrementing huff for table indices. It is assumed that
+ len is equal to curr + drop, so there is no loop needed to increment
+ through high index bits. When the current sub-table is filled, the loop
+ drops back to the root table to fill in any remaining entries there.
+ */
+ here.op = (unsigned char)64; /* invalid code marker */
+ here.bits = (unsigned char)(len - drop);
+ here.val = (unsigned short)0;
+ while (huff != 0) {
+ /* when done with sub-table, drop back to root table */
+ if (drop != 0 && (huff & mask) != low) {
+ drop = 0;
+ len = root;
+ next = *table;
+ here.bits = (unsigned char)len;
+ }
+
+ /* put invalid code marker in table */
+ next[huff >> drop] = here;
+
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
+ }
+
+ /* set return parameters */
+ *table += used;
+ *bits = root;
+ return 0;
+}
--- /dev/null
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-2005, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Structure for decoding tables. Each entry provides either the
+ information needed to do the operation requested by the code that
+ indexed that table entry, or it provides a pointer to another
+ table that indexes more bits of the code. op indicates whether
+ the entry is a pointer to another table, a literal, a length or
+ distance, an end-of-block, or an invalid code. For a table
+ pointer, the low four bits of op is the number of index bits of
+ that table. For a length or distance, the low four bits of op
+ is the number of extra bits to get after the code. bits is
+ the number of bits in this code or part of the code to drop off
+ of the bit buffer. val is the actual byte to output in the case
+ of a literal, the base length or distance, or the offset from
+ the current table to the next table. Each entry is four bytes. */
+typedef struct {
+ unsigned char op; /* operation, extra bits, table bits */
+ unsigned char bits; /* bits in this part of the code */
+ unsigned short val; /* offset in table or code value */
+} code;
+
+/* op values as set by inflate_table():
+ 00000000 - literal
+ 0000tttt - table link, tttt != 0 is the number of table index bits
+ 0001eeee - length or distance, eeee is the number of extra bits
+ 01100000 - end of block
+ 01000000 - invalid code
+ */
+
+/* Maximum size of the dynamic table. The maximum number of code structures is
+ 1444, which is the sum of 852 for literal/length codes and 592 for distance
+ codes. These values were found by exhaustive searches using the program
+ examples/enough.c found in the zlib distribtution. The arguments to that
+ program are the number of symbols, the initial root table size, and the
+ maximum bit length of a code. "enough 286 9 15" for literal/length codes
+ returns returns 852, and "enough 30 6 15" for distance codes returns 592.
+ The initial root table size (9 or 6) is found in the fifth argument of the
+ inflate_table() calls in inflate.c and infback.c. If the root table size is
+ changed, then these maximum sizes would be need to be recalculated and
+ updated. */
+#define ENOUGH_LENS 852
+#define ENOUGH_DISTS 592
+#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS)
+
+/* Type of code to build for inflate_table() */
+typedef enum {
+ CODES,
+ LENS,
+ DISTS
+} codetype;
+
+int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens,
+ unsigned codes, code FAR * FAR *table,
+ unsigned FAR *bits, unsigned short FAR *work));
--- /dev/null
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-2010 Jean-loup Gailly
+ * detect_data_type() function provided freely by Cosmin Truta, 2006
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* #define GEN_TREES_H */
+
+#include "deflate.h"
+
+#ifdef DEBUG
+# include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+local const int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+local const uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+#define DIST_CODE_LEN 512 /* see definition of array dist_code below */
+
+#if defined(GEN_TREES_H) || !defined(STDC)
+/* non ANSI compilers may not accept trees.h */
+
+local ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
+ * below).
+ */
+
+local ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+uch _dist_code[DIST_CODE_LEN];
+/* Distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+uch _length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+local int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+local int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+#else
+# include "trees.h"
+#endif /* GEN_TREES_H */
+
+struct static_tree_desc_s {
+ const ct_data *static_tree; /* static tree or NULL */
+ const intf *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+local static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+local static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+local static_tree_desc static_bl_desc =
+{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+local void tr_static_init OF((void));
+local void init_block OF((deflate_state *s));
+local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
+local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
+local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
+local void build_tree OF((deflate_state *s, tree_desc *desc));
+local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local int build_bl_tree OF((deflate_state *s));
+local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
+ int blcodes));
+local void compress_block OF((deflate_state *s, ct_data *ltree,
+ ct_data *dtree));
+local int detect_data_type OF((deflate_state *s));
+local unsigned bi_reverse OF((unsigned value, int length));
+local void bi_windup OF((deflate_state *s));
+local void bi_flush OF((deflate_state *s));
+local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
+ int header));
+
+#ifdef GEN_TREES_H
+local void gen_trees_header OF((void));
+#endif
+
+#ifndef DEBUG
+# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG */
+# define send_code(s, c, tree) \
+ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG
+local void send_bits OF((deflate_state *s, int value, int length));
+
+local void send_bits(s, value, length)
+ deflate_state *s;
+ int value; /* value to send */
+ int length; /* number of bits */
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (ush)value << s->bi_valid;
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= (ush)value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = value;\
+ s->bi_buf |= (ush)val << s->bi_valid;\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (ush)(value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG */
+
+
+/* the arguments must not have side effects */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables.
+ */
+local void tr_static_init()
+{
+#if defined(GEN_TREES_H) || !defined(STDC)
+ static int static_init_done = 0;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* For some embedded targets, global variables are not initialized: */
+#ifdef NO_INIT_GLOBAL_POINTERS
+ static_l_desc.static_tree = static_ltree;
+ static_l_desc.extra_bits = extra_lbits;
+ static_d_desc.static_tree = static_dtree;
+ static_d_desc.extra_bits = extra_dbits;
+ static_bl_desc.extra_bits = extra_blbits;
+#endif
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ _length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ _length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ _dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ _dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ }
+ static_init_done = 1;
+
+# ifdef GEN_TREES_H
+ gen_trees_header();
+# endif
+#endif /* defined(GEN_TREES_H) || !defined(STDC) */
+}
+
+/* ===========================================================================
+ * Genererate the file trees.h describing the static trees.
+ */
+#ifdef GEN_TREES_H
+# ifndef DEBUG
+# include <stdio.h>
+# endif
+
+# define SEPARATOR(i, last, width) \
+ ((i) == (last)? "\n};\n\n" : \
+ ((i) % (width) == (width)-1 ? ",\n" : ", "))
+
+void gen_trees_header()
+{
+ FILE *header = fopen("trees.h", "w");
+ int i;
+
+ Assert (header != NULL, "Can't open trees.h");
+ fprintf(header,
+ "/* header created automatically with -DGEN_TREES_H */\n\n");
+
+ fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n");
+ for (i = 0; i < L_CODES+2; i++) {
+ fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code,
+ static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5));
+ }
+
+ fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n");
+ for (i = 0; i < D_CODES; i++) {
+ fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code,
+ static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5));
+ }
+
+ fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n");
+ for (i = 0; i < DIST_CODE_LEN; i++) {
+ fprintf(header, "%2u%s", _dist_code[i],
+ SEPARATOR(i, DIST_CODE_LEN-1, 20));
+ }
+
+ fprintf(header,
+ "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n");
+ for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) {
+ fprintf(header, "%2u%s", _length_code[i],
+ SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20));
+ }
+
+ fprintf(header, "local const int base_length[LENGTH_CODES] = {\n");
+ for (i = 0; i < LENGTH_CODES; i++) {
+ fprintf(header, "%1u%s", base_length[i],
+ SEPARATOR(i, LENGTH_CODES-1, 20));
+ }
+
+ fprintf(header, "local const int base_dist[D_CODES] = {\n");
+ for (i = 0; i < D_CODES; i++) {
+ fprintf(header, "%5u%s", base_dist[i],
+ SEPARATOR(i, D_CODES-1, 10));
+ }
+
+ fclose(header);
+}
+#endif /* GEN_TREES_H */
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void ZLIB_INTERNAL _tr_init(s)
+ deflate_state *s;
+{
+ tr_static_init();
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG
+ s->compressed_len = 0L;
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+local void init_block(s)
+ deflate_state *s;
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+local void pqdownheap(s, tree, k)
+ deflate_state *s;
+ ct_data *tree; /* the tree to restore */
+ int k; /* node to move down */
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+local void gen_bitlen(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ const intf *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (bits + xbits);
+ if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Trace((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if ((unsigned) tree[m].Len != (unsigned) bits) {
+ Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((long)bits - (long)tree[m].Len)
+ *(long)tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+local void gen_codes (tree, max_code, bl_count)
+ ct_data *tree; /* the tree to decorate */
+ int max_code; /* largest code with non zero frequency */
+ ushf *bl_count; /* number of codes at each bit length */
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+ ush code = 0; /* running code value */
+ int bits; /* bit index */
+ int n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = (code + bl_count[bits-1]) << 1;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ "inconsistent bit counts");
+ Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ int len = tree[n].Len;
+ if (len == 0) continue;
+ /* Now reverse the bits */
+ tree[n].Code = bi_reverse(next_code[len]++, len);
+
+ Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+local void build_tree(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ int elems = desc->stat_desc->elems;
+ int n, m; /* iterate over heap elements */
+ int max_code = -1; /* largest code with non zero frequency */
+ int node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n].Freq != 0) {
+ s->heap[++(s->heap_len)] = max_code = n;
+ s->depth[n] = 0;
+ } else {
+ tree[n].Len = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s->heap_len < 2) {
+ node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+ tree[node].Freq = 1;
+ s->depth[node] = 0;
+ s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc->max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ pqremove(s, tree, n); /* n = node of least frequency */
+ m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+ s->heap[--(s->heap_max)] = m;
+
+ /* Create a new node father of n and m */
+ tree[node].Freq = tree[n].Freq + tree[m].Freq;
+ s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ?
+ s->depth[n] : s->depth[m]) + 1);
+ tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+ if (tree == s->bl_tree) {
+ fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+ node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+ }
+#endif
+ /* and insert the new node in the heap */
+ s->heap[SMALLEST] = node++;
+ pqdownheap(s, tree, SMALLEST);
+
+ } while (s->heap_len >= 2);
+
+ s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, (tree_desc *)desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+local void scan_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ if (nextlen == 0) max_count = 138, min_count = 3;
+ tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ s->bl_tree[curlen].Freq += count;
+ } else if (curlen != 0) {
+ if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+ s->bl_tree[REP_3_6].Freq++;
+ } else if (count <= 10) {
+ s->bl_tree[REPZ_3_10].Freq++;
+ } else {
+ s->bl_tree[REPZ_11_138].Freq++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+local void send_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen == 0) max_count = 138, min_count = 3;
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+ } else if (curlen != 0) {
+ if (curlen != prevlen) {
+ send_code(s, curlen, s->bl_tree); count--;
+ }
+ Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+local int build_bl_tree(s)
+ deflate_state *s;
+{
+ int max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+ scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, (tree_desc *)(&(s->bl_desc)));
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+ if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s->opt_len += 3*(max_blindex+1) + 5+5+4;
+ Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ s->opt_len, s->static_len));
+
+ return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+local void send_all_trees(s, lcodes, dcodes, blcodes)
+ deflate_state *s;
+ int lcodes, dcodes, blcodes; /* number of codes for each tree */
+{
+ int rank; /* index in bl_order */
+
+ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ "too many codes");
+ Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes-1, 5);
+ send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+ }
+ Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last)
+ deflate_state *s;
+ charf *buf; /* input block */
+ ulg stored_len; /* length of input block */
+ int last; /* one if this is the last block for a file */
+{
+ send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */
+#ifdef DEBUG
+ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+ s->compressed_len += (stored_len + 4) << 3;
+#endif
+ copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
+}
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ * The current inflate code requires 9 bits of lookahead. If the
+ * last two codes for the previous block (real code plus EOB) were coded
+ * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ * the last real code. In this case we send two empty static blocks instead
+ * of one. (There are no problems if the previous block is stored or fixed.)
+ * To simplify the code, we assume the worst case of last real code encoded
+ * on one bit only.
+ */
+void ZLIB_INTERNAL _tr_align(s)
+ deflate_state *s;
+{
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+#ifdef DEBUG
+ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+#endif
+ bi_flush(s);
+ /* Of the 10 bits for the empty block, we have already sent
+ * (10 - bi_valid) bits. The lookahead for the last real code (before
+ * the EOB of the previous block) was thus at least one plus the length
+ * of the EOB plus what we have just sent of the empty static block.
+ */
+ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+#ifdef DEBUG
+ s->compressed_len += 10L;
+#endif
+ bi_flush(s);
+ }
+ s->last_eob_len = 7;
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and output the encoded block to the zip file.
+ */
+void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
+ deflate_state *s;
+ charf *buf; /* input block, or NULL if too old */
+ ulg stored_len; /* length of input block */
+ int last; /* one if this is the last block for a file */
+{
+ ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ int max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s->level > 0) {
+
+ /* Check if the file is binary or text */
+ if (s->strm->data_type == Z_UNKNOWN)
+ s->strm->data_type = detect_data_type(s);
+
+ /* Construct the literal and distance trees */
+ build_tree(s, (tree_desc *)(&(s->l_desc)));
+ Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+
+ build_tree(s, (tree_desc *)(&(s->d_desc)));
+ Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute the block lengths in bytes. */
+ opt_lenb = (s->opt_len+3+7)>>3;
+ static_lenb = (s->static_len+3+7)>>3;
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ s->last_lit));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+ } else {
+ Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+#ifdef FORCE_STORED
+ if (buf != (char*)0) { /* force stored block */
+#else
+ if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ /* 4: two words for the lengths */
+#endif
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ _tr_stored_block(s, buf, stored_len, last);
+
+#ifdef FORCE_STATIC
+ } else if (static_lenb >= 0) { /* force static trees */
+#else
+ } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) {
+#endif
+ send_bits(s, (STATIC_TREES<<1)+last, 3);
+ compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
+#ifdef DEBUG
+ s->compressed_len += 3 + s->static_len;
+#endif
+ } else {
+ send_bits(s, (DYN_TREES<<1)+last, 3);
+ send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+ max_blindex+1);
+ compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
+#ifdef DEBUG
+ s->compressed_len += 3 + s->opt_len;
+#endif
+ }
+ Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ /* The above check is made mod 2^32, for files larger than 512 MB
+ * and uLong implemented on 32 bits.
+ */
+ init_block(s);
+
+ if (last) {
+ bi_windup(s);
+#ifdef DEBUG
+ s->compressed_len += 7; /* align on byte boundary */
+#endif
+ }
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ s->compressed_len-7*last));
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int ZLIB_INTERNAL _tr_tally (s, dist, lc)
+ deflate_state *s;
+ unsigned dist; /* distance of matched string */
+ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+{
+ s->d_buf[s->last_lit] = (ush)dist;
+ s->l_buf[s->last_lit++] = (uch)lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+ } else {
+ s->matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ Assert((ush)dist < (ush)MAX_DIST(s) &&
+ (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
+
+ s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+
+#ifdef TRUNCATE_BLOCK
+ /* Try to guess if it is profitable to stop the current block here */
+ if ((s->last_lit & 0x1fff) == 0 && s->level > 2) {
+ /* Compute an upper bound for the compressed length */
+ ulg out_length = (ulg)s->last_lit*8L;
+ ulg in_length = (ulg)((long)s->strstart - s->block_start);
+ int dcode;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += (ulg)s->dyn_dtree[dcode].Freq *
+ (5L+extra_dbits[dcode]);
+ }
+ out_length >>= 3;
+ Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+ s->last_lit, in_length, out_length,
+ 100L - out_length*100L/in_length));
+ if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+ }
+#endif
+ return (s->last_lit == s->lit_bufsize-1);
+ /* We avoid equality with lit_bufsize because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+local void compress_block(s, ltree, dtree)
+ deflate_state *s;
+ ct_data *ltree; /* literal tree */
+ ct_data *dtree; /* distance tree */
+{
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+ unsigned lx = 0; /* running index in l_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+ if (s->last_lit != 0) do {
+ dist = s->d_buf[lx];
+ lc = s->l_buf[lx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = _length_code[lc];
+ send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra != 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra != 0) {
+ dist -= base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+ Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,
+ "pendingBuf overflow");
+
+ } while (lx < s->last_lit);
+
+ send_code(s, END_BLOCK, ltree);
+ s->last_eob_len = ltree[END_BLOCK].Len;
+}
+
+/* ===========================================================================
+ * Check if the data type is TEXT or BINARY, using the following algorithm:
+ * - TEXT if the two conditions below are satisfied:
+ * a) There are no non-portable control characters belonging to the
+ * "black list" (0..6, 14..25, 28..31).
+ * b) There is at least one printable character belonging to the
+ * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).
+ * - BINARY otherwise.
+ * - The following partially-portable control characters form a
+ * "gray list" that is ignored in this detection algorithm:
+ * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).
+ * IN assertion: the fields Freq of dyn_ltree are set.
+ */
+local int detect_data_type(s)
+ deflate_state *s;
+{
+ /* black_mask is the bit mask of black-listed bytes
+ * set bits 0..6, 14..25, and 28..31
+ * 0xf3ffc07f = binary 11110011111111111100000001111111
+ */
+ unsigned long black_mask = 0xf3ffc07fUL;
+ int n;
+
+ /* Check for non-textual ("black-listed") bytes. */
+ for (n = 0; n <= 31; n++, black_mask >>= 1)
+ if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0))
+ return Z_BINARY;
+
+ /* Check for textual ("white-listed") bytes. */
+ if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0
+ || s->dyn_ltree[13].Freq != 0)
+ return Z_TEXT;
+ for (n = 32; n < LITERALS; n++)
+ if (s->dyn_ltree[n].Freq != 0)
+ return Z_TEXT;
+
+ /* There are no "black-listed" or "white-listed" bytes:
+ * this stream either is empty or has tolerated ("gray-listed") bytes only.
+ */
+ return Z_BINARY;
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+local unsigned bi_reverse(code, len)
+ unsigned code; /* the value to invert */
+ int len; /* its bit length */
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+local void bi_flush(s)
+ deflate_state *s;
+{
+ if (s->bi_valid == 16) {
+ put_short(s, s->bi_buf);
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ } else if (s->bi_valid >= 8) {
+ put_byte(s, (Byte)s->bi_buf);
+ s->bi_buf >>= 8;
+ s->bi_valid -= 8;
+ }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+local void bi_windup(s)
+ deflate_state *s;
+{
+ if (s->bi_valid > 8) {
+ put_short(s, s->bi_buf);
+ } else if (s->bi_valid > 0) {
+ put_byte(s, (Byte)s->bi_buf);
+ }
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef DEBUG
+ s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
+
+/* ===========================================================================
+ * Copy a stored block, storing first the length and its
+ * one's complement if requested.
+ */
+local void copy_block(s, buf, len, header)
+ deflate_state *s;
+ charf *buf; /* the input data */
+ unsigned len; /* its length */
+ int header; /* true if block header must be written */
+{
+ bi_windup(s); /* align on byte boundary */
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+
+ if (header) {
+ put_short(s, (ush)len);
+ put_short(s, (ush)~len);
+#ifdef DEBUG
+ s->bits_sent += 2*16;
+#endif
+ }
+#ifdef DEBUG
+ s->bits_sent += (ulg)len<<3;
+#endif
+ while (len--) {
+ put_byte(s, *buf++);
+ }
+}
--- /dev/null
+/* header created automatically with -DGEN_TREES_H */
+
+local const ct_data static_ltree[L_CODES+2] = {
+{{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}},
+{{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}},
+{{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}},
+{{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}},
+{{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}},
+{{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}},
+{{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}},
+{{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}},
+{{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}},
+{{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}},
+{{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}},
+{{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}},
+{{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}},
+{{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}},
+{{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}},
+{{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}},
+{{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}},
+{{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}},
+{{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}},
+{{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}},
+{{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}},
+{{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}},
+{{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}},
+{{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}},
+{{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}},
+{{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}},
+{{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}},
+{{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}},
+{{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}},
+{{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}},
+{{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}},
+{{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}},
+{{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}},
+{{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}},
+{{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}},
+{{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}},
+{{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}},
+{{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}},
+{{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}},
+{{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}},
+{{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}},
+{{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}},
+{{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}},
+{{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}},
+{{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}},
+{{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}},
+{{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}},
+{{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}},
+{{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}},
+{{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}},
+{{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}},
+{{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}},
+{{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}},
+{{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}},
+{{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}},
+{{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}},
+{{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}},
+{{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}}
+};
+
+local const ct_data static_dtree[D_CODES] = {
+{{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}},
+{{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}},
+{{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}},
+{{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}},
+{{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}},
+{{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}}
+};
+
+const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
+ 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10,
+10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13,
+13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17,
+18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22,
+23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
+};
+
+const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28
+};
+
+local const int base_length[LENGTH_CODES] = {
+0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+64, 80, 96, 112, 128, 160, 192, 224, 0
+};
+
+local const int base_dist[D_CODES] = {
+ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24,
+ 32, 48, 64, 96, 128, 192, 256, 384, 512, 768,
+ 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576
+};
+
--- /dev/null
+/* uncompr.c -- decompress a memory buffer
+ * Copyright (C) 1995-2003, 2010 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#define ZLIB_INTERNAL
+#include "zlib.h"
+
+/* ===========================================================================
+ Decompresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total
+ size of the destination buffer, which must be large enough to hold the
+ entire uncompressed data. (The size of the uncompressed data must have
+ been saved previously by the compressor and transmitted to the decompressor
+ by some mechanism outside the scope of this compression library.)
+ Upon exit, destLen is the actual size of the compressed buffer.
+
+ uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer, or Z_DATA_ERROR if the input data was corrupted.
+*/
+int ZEXPORT uncompress (dest, destLen, source, sourceLen)
+ Bytef *dest;
+ uLongf *destLen;
+ const Bytef *source;
+ uLong sourceLen;
+{
+ z_stream stream;
+ int err;
+
+ stream.next_in = (Bytef*)source;
+ stream.avail_in = (uInt)sourceLen;
+ /* Check for source > 64K on 16-bit machine: */
+ if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR;
+
+ stream.next_out = dest;
+ stream.avail_out = (uInt)*destLen;
+ if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR;
+
+ stream.zalloc = (alloc_func)0;
+ stream.zfree = (free_func)0;
+
+ err = inflateInit(&stream);
+ if (err != Z_OK) return err;
+
+ err = inflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END) {
+ inflateEnd(&stream);
+ if (err == Z_NEED_DICT || (err == Z_BUF_ERROR && stream.avail_in == 0))
+ return Z_DATA_ERROR;
+ return err;
+ }
+ *destLen = stream.total_out;
+
+ err = inflateEnd(&stream);
+ return err;
+}
--- /dev/null
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-2010 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#ifndef ZCONF_H
+#define ZCONF_H
+
+/*
+ * If you *really* need a unique prefix for all types and library functions,
+ * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
+ * Even better than compiling with -DZ_PREFIX would be to use configure to set
+ * this permanently in zconf.h using "./configure --zprefix".
+ */
+#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */
+
+/* all linked symbols */
+# define _dist_code z__dist_code
+# define _length_code z__length_code
+# define _tr_align z__tr_align
+# define _tr_flush_block z__tr_flush_block
+# define _tr_init z__tr_init
+# define _tr_stored_block z__tr_stored_block
+# define _tr_tally z__tr_tally
+# define adler32 z_adler32
+# define adler32_combine z_adler32_combine
+# define adler32_combine64 z_adler32_combine64
+# define compress z_compress
+# define compress2 z_compress2
+# define compressBound z_compressBound
+# define crc32 z_crc32
+# define crc32_combine z_crc32_combine
+# define crc32_combine64 z_crc32_combine64
+# define deflate z_deflate
+# define deflateBound z_deflateBound
+# define deflateCopy z_deflateCopy
+# define deflateEnd z_deflateEnd
+# define deflateInit2_ z_deflateInit2_
+# define deflateInit_ z_deflateInit_
+# define deflateParams z_deflateParams
+# define deflatePrime z_deflatePrime
+# define deflateReset z_deflateReset
+# define deflateSetDictionary z_deflateSetDictionary
+# define deflateSetHeader z_deflateSetHeader
+# define deflateTune z_deflateTune
+# define deflate_copyright z_deflate_copyright
+# define get_crc_table z_get_crc_table
+# define gz_error z_gz_error
+# define gz_intmax z_gz_intmax
+# define gz_strwinerror z_gz_strwinerror
+# define gzbuffer z_gzbuffer
+# define gzclearerr z_gzclearerr
+# define gzclose z_gzclose
+# define gzclose_r z_gzclose_r
+# define gzclose_w z_gzclose_w
+# define gzdirect z_gzdirect
+# define gzdopen z_gzdopen
+# define gzeof z_gzeof
+# define gzerror z_gzerror
+# define gzflush z_gzflush
+# define gzgetc z_gzgetc
+# define gzgets z_gzgets
+# define gzoffset z_gzoffset
+# define gzoffset64 z_gzoffset64
+# define gzopen z_gzopen
+# define gzopen64 z_gzopen64
+# define gzprintf z_gzprintf
+# define gzputc z_gzputc
+# define gzputs z_gzputs
+# define gzread z_gzread
+# define gzrewind z_gzrewind
+# define gzseek z_gzseek
+# define gzseek64 z_gzseek64
+# define gzsetparams z_gzsetparams
+# define gztell z_gztell
+# define gztell64 z_gztell64
+# define gzungetc z_gzungetc
+# define gzwrite z_gzwrite
+# define inflate z_inflate
+# define inflateBack z_inflateBack
+# define inflateBackEnd z_inflateBackEnd
+# define inflateBackInit_ z_inflateBackInit_
+# define inflateCopy z_inflateCopy
+# define inflateEnd z_inflateEnd
+# define inflateGetHeader z_inflateGetHeader
+# define inflateInit2_ z_inflateInit2_
+# define inflateInit_ z_inflateInit_
+# define inflateMark z_inflateMark
+# define inflatePrime z_inflatePrime
+# define inflateReset z_inflateReset
+# define inflateReset2 z_inflateReset2
+# define inflateSetDictionary z_inflateSetDictionary
+# define inflateSync z_inflateSync
+# define inflateSyncPoint z_inflateSyncPoint
+# define inflateUndermine z_inflateUndermine
+# define inflate_copyright z_inflate_copyright
+# define inflate_fast z_inflate_fast
+# define inflate_table z_inflate_table
+# define uncompress z_uncompress
+# define zError z_zError
+# define zcalloc z_zcalloc
+# define zcfree z_zcfree
+# define zlibCompileFlags z_zlibCompileFlags
+# define zlibVersion z_zlibVersion
+
+/* all zlib typedefs in zlib.h and zconf.h */
+# define Byte z_Byte
+# define Bytef z_Bytef
+# define alloc_func z_alloc_func
+# define charf z_charf
+# define free_func z_free_func
+# define gzFile z_gzFile
+# define gz_header z_gz_header
+# define gz_headerp z_gz_headerp
+# define in_func z_in_func
+# define intf z_intf
+# define out_func z_out_func
+# define uInt z_uInt
+# define uIntf z_uIntf
+# define uLong z_uLong
+# define uLongf z_uLongf
+# define voidp z_voidp
+# define voidpc z_voidpc
+# define voidpf z_voidpf
+
+/* all zlib structs in zlib.h and zconf.h */
+# define gz_header_s z_gz_header_s
+# define internal_state z_internal_state
+
+#endif
+
+#if defined(__MSDOS__) && !defined(MSDOS)
+# define MSDOS
+#endif
+#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2)
+# define OS2
+#endif
+#if defined(_WINDOWS) && !defined(WINDOWS)
+# define WINDOWS
+#endif
+#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__)
+# ifndef WIN32
+# define WIN32
+# endif
+#endif
+#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32)
+# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__)
+# ifndef SYS16BIT
+# define SYS16BIT
+# endif
+# endif
+#endif
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ */
+#ifdef SYS16BIT
+# define MAXSEG_64K
+#endif
+#ifdef MSDOS
+# define UNALIGNED_OK
+#endif
+
+#ifdef __STDC_VERSION__
+# ifndef STDC
+# define STDC
+# endif
+# if __STDC_VERSION__ >= 199901L
+# ifndef STDC99
+# define STDC99
+# endif
+# endif
+#endif
+#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__))
+# define STDC
+#endif
+
+#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */
+# define STDC
+#endif
+
+#ifndef STDC
+# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
+# define const /* note: need a more gentle solution here */
+# endif
+#endif
+
+/* Some Mac compilers merge all .h files incorrectly: */
+#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__)
+# define NO_DUMMY_DECL
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2.
+ * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
+ * created by gzip. (Files created by minigzip can still be extracted by
+ * gzip.)
+ */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ (1 << (windowBits+2)) + (1 << (memLevel+9))
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+/* The following definitions for FAR are needed only for MSDOS mixed
+ * model programming (small or medium model with some far allocations).
+ * This was tested only with MSC; for other MSDOS compilers you may have
+ * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
+ * just define FAR to be empty.
+ */
+#ifdef SYS16BIT
+# if defined(M_I86SM) || defined(M_I86MM)
+ /* MSC small or medium model */
+# define SMALL_MEDIUM
+# ifdef _MSC_VER
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+# if (defined(__SMALL__) || defined(__MEDIUM__))
+ /* Turbo C small or medium model */
+# define SMALL_MEDIUM
+# ifdef __BORLANDC__
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+#endif
+
+#if defined(WINDOWS) || defined(WIN32)
+ /* If building or using zlib as a DLL, define ZLIB_DLL.
+ * This is not mandatory, but it offers a little performance increase.
+ */
+# ifdef ZLIB_DLL
+# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500))
+# ifdef ZLIB_INTERNAL
+# define ZEXTERN extern __declspec(dllexport)
+# else
+# define ZEXTERN extern __declspec(dllimport)
+# endif
+# endif
+# endif /* ZLIB_DLL */
+ /* If building or using zlib with the WINAPI/WINAPIV calling convention,
+ * define ZLIB_WINAPI.
+ * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI.
+ */
+# ifdef ZLIB_WINAPI
+# ifdef FAR
+# undef FAR
+# endif
+# include <windows.h>
+ /* No need for _export, use ZLIB.DEF instead. */
+ /* For complete Windows compatibility, use WINAPI, not __stdcall. */
+# define ZEXPORT WINAPI
+# ifdef WIN32
+# define ZEXPORTVA WINAPIV
+# else
+# define ZEXPORTVA FAR CDECL
+# endif
+# endif
+#endif
+
+#if defined (__BEOS__)
+# ifdef ZLIB_DLL
+# ifdef ZLIB_INTERNAL
+# define ZEXPORT __declspec(dllexport)
+# define ZEXPORTVA __declspec(dllexport)
+# else
+# define ZEXPORT __declspec(dllimport)
+# define ZEXPORTVA __declspec(dllimport)
+# endif
+# endif
+#endif
+
+#ifndef ZEXTERN
+# define ZEXTERN extern
+#endif
+#ifndef ZEXPORT
+# define ZEXPORT
+#endif
+#ifndef ZEXPORTVA
+# define ZEXPORTVA
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+#if !defined(__MACTYPES__)
+typedef unsigned char Byte; /* 8 bits */
+#endif
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+#ifdef SMALL_MEDIUM
+ /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */
+# define Bytef Byte FAR
+#else
+ typedef Byte FAR Bytef;
+#endif
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void const *voidpc;
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte const *voidpc;
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+#if !defined(MSDOS) && !defined(WINDOWS) && !defined(WIN32)
+# define Z_HAVE_UNISTD_H
+#endif
+
+#ifdef STDC
+# include <sys/types.h> /* for off_t */
+#endif
+
+/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and
+ * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even
+ * though the former does not conform to the LFS document), but considering
+ * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
+ * equivalently requesting no 64-bit operations
+ */
+#if -_LARGEFILE64_SOURCE - -1 == 1
+# undef _LARGEFILE64_SOURCE
+#endif
+
+#if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
+# include <unistd.h> /* for SEEK_* and off_t */
+# ifdef VMS
+# include <unixio.h> /* for off_t */
+# endif
+# ifndef z_off_t
+# define z_off_t off_t
+# endif
+#endif
+
+#ifndef SEEK_SET
+# define SEEK_SET 0 /* Seek from beginning of file. */
+# define SEEK_CUR 1 /* Seek from current position. */
+# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */
+#endif
+
+#ifndef z_off_t
+# define z_off_t long
+#endif
+
+#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
+# define z_off64_t off64_t
+#else
+# define z_off64_t z_off_t
+#endif
+
+#if defined(__OS400__)
+# define NO_vsnprintf
+#endif
+
+#if defined(__MVS__)
+# define NO_vsnprintf
+#endif
+
+/* MVS linker does not support external names larger than 8 bytes */
+#if defined(__MVS__)
+ #pragma map(deflateInit_,"DEIN")
+ #pragma map(deflateInit2_,"DEIN2")
+ #pragma map(deflateEnd,"DEEND")
+ #pragma map(deflateBound,"DEBND")
+ #pragma map(inflateInit_,"ININ")
+ #pragma map(inflateInit2_,"ININ2")
+ #pragma map(inflateEnd,"INEND")
+ #pragma map(inflateSync,"INSY")
+ #pragma map(inflateSetDictionary,"INSEDI")
+ #pragma map(compressBound,"CMBND")
+ #pragma map(inflate_table,"INTABL")
+ #pragma map(inflate_fast,"INFA")
+ #pragma map(inflate_copyright,"INCOPY")
+#endif
+
+#endif /* ZCONF_H */
--- /dev/null
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.2.5, April 19th, 2010
+
+ Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
+ (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
+*/
+
+#ifndef ZLIB_H
+#define ZLIB_H
+
+#include "zconf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ZLIB_VERSION "1.2.5"
+#define ZLIB_VERNUM 0x1250
+#define ZLIB_VER_MAJOR 1
+#define ZLIB_VER_MINOR 2
+#define ZLIB_VER_REVISION 5
+#define ZLIB_VER_SUBREVISION 0
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed data.
+ This version of the library supports only one compression method (deflation)
+ but other algorithms will be added later and will have the same stream
+ interface.
+
+ Compression can be done in a single step if the buffers are large enough,
+ or can be done by repeated calls of the compression function. In the latter
+ case, the application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The compressed data format used by default by the in-memory functions is
+ the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped
+ around a deflate stream, which is itself documented in RFC 1951.
+
+ The library also supports reading and writing files in gzip (.gz) format
+ with an interface similar to that of stdio using the functions that start
+ with "gz". The gzip format is different from the zlib format. gzip is a
+ gzip wrapper, documented in RFC 1952, wrapped around a deflate stream.
+
+ This library can optionally read and write gzip streams in memory as well.
+
+ The zlib format was designed to be compact and fast for use in memory
+ and on communications channels. The gzip format was designed for single-
+ file compression on file systems, has a larger header than zlib to maintain
+ directory information, and uses a different, slower check method than zlib.
+
+ The library does not install any signal handler. The decoder checks
+ the consistency of the compressed data, so the library should never crash
+ even in case of corrupted input.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidpf opaque; /* private data object passed to zalloc and zfree */
+
+ int data_type; /* best guess about the data type: binary or text */
+ uLong adler; /* adler32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream FAR *z_streamp;
+
+/*
+ gzip header information passed to and from zlib routines. See RFC 1952
+ for more details on the meanings of these fields.
+*/
+typedef struct gz_header_s {
+ int text; /* true if compressed data believed to be text */
+ uLong time; /* modification time */
+ int xflags; /* extra flags (not used when writing a gzip file) */
+ int os; /* operating system */
+ Bytef *extra; /* pointer to extra field or Z_NULL if none */
+ uInt extra_len; /* extra field length (valid if extra != Z_NULL) */
+ uInt extra_max; /* space at extra (only when reading header) */
+ Bytef *name; /* pointer to zero-terminated file name or Z_NULL */
+ uInt name_max; /* space at name (only when reading header) */
+ Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */
+ uInt comm_max; /* space at comment (only when reading header) */
+ int hcrc; /* true if there was or will be a header crc */
+ int done; /* true when done reading gzip header (not used
+ when writing a gzip file) */
+} gz_header;
+
+typedef gz_header FAR *gz_headerp;
+
+/*
+ The application must update next_in and avail_in when avail_in has dropped
+ to zero. It must update next_out and avail_out when avail_out has dropped
+ to zero. The application must initialize zalloc, zfree and opaque before
+ calling the init function. All other fields are set by the compression
+ library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ If zlib is used in a multi-threaded application, zalloc and zfree must be
+ thread safe.
+
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this if
+ the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers
+ returned by zalloc for objects of exactly 65536 bytes *must* have their
+ offset normalized to zero. The default allocation function provided by this
+ library ensures this (see zutil.c). To reduce memory requirements and avoid
+ any allocation of 64K objects, at the expense of compression ratio, compile
+ the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or progress
+ reports. After compression, total_in holds the total size of the
+ uncompressed data and may be saved for use in the decompressor (particularly
+ if the decompressor wants to decompress everything in a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_SYNC_FLUSH 2
+#define Z_FULL_FLUSH 3
+#define Z_FINISH 4
+#define Z_BLOCK 5
+#define Z_TREES 6
+/* Allowed flush values; see deflate() and inflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative values
+ * are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_RLE 3
+#define Z_FIXED 4
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_TEXT 1
+#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field (though see inflate()) */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+#define zlib_version zlibVersion()
+/* for compatibility with versions < 1.0.2 */
+
+
+ /* basic functions */
+
+ZEXTERN const char * ZEXPORT zlibVersion OF((void));
+/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is not
+ compatible with the zlib.h header file used by the application. This check
+ is automatically made by deflateInit and inflateInit.
+ */
+
+/*
+ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller. If
+ zalloc and zfree are set to Z_NULL, deflateInit updates them to use default
+ allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at all
+ (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION
+ requests a default compromise between speed and compression (currently
+ equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if level is not a valid compression level, or
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION). msg is set to null
+ if there is no error message. deflateInit does not perform any compression:
+ this will be done by deflate().
+*/
+
+
+ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush));
+/*
+ deflate compresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce
+ some output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. deflate performs one or both of the
+ following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary (in interactive applications). Some
+ output may be provided even if flush is not set.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming more
+ output, and updating avail_in or avail_out accordingly; avail_out should
+ never be zero before the call. The application can consume the compressed
+ output when it wants, for example when the output buffer is full (avail_out
+ == 0), or after each call of deflate(). If deflate returns Z_OK and with
+ zero avail_out, it must be called again after making room in the output
+ buffer because there might be more output pending.
+
+ Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to
+ decide how much data to accumulate before producing output, in order to
+ maximize compression.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
+ flushed to the output buffer and the output is aligned on a byte boundary, so
+ that the decompressor can get all input data available so far. (In
+ particular avail_in is zero after the call if enough output space has been
+ provided before the call.) Flushing may degrade compression for some
+ compression algorithms and so it should be used only when necessary. This
+ completes the current deflate block and follows it with an empty stored block
+ that is three bits plus filler bits to the next byte, followed by four bytes
+ (00 00 ff ff).
+
+ If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the
+ output buffer, but the output is not aligned to a byte boundary. All of the
+ input data so far will be available to the decompressor, as for Z_SYNC_FLUSH.
+ This completes the current deflate block and follows it with an empty fixed
+ codes block that is 10 bits long. This assures that enough bytes are output
+ in order for the decompressor to finish the block before the empty fixed code
+ block.
+
+ If flush is set to Z_BLOCK, a deflate block is completed and emitted, as
+ for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to
+ seven bits of the current block are held to be written as the next byte after
+ the next deflate block is completed. In this case, the decompressor may not
+ be provided enough bits at this point in order to complete decompression of
+ the data provided so far to the compressor. It may need to wait for the next
+ block to be emitted. This is for advanced applications that need to control
+ the emission of deflate blocks.
+
+ If flush is set to Z_FULL_FLUSH, all output is flushed as with
+ Z_SYNC_FLUSH, and the compression state is reset so that decompression can
+ restart from this point if previous compressed data has been damaged or if
+ random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
+ compression.
+
+ If deflate returns with avail_out == 0, this function must be called again
+ with the same value of the flush parameter and more output space (updated
+ avail_out), until the flush is complete (deflate returns with non-zero
+ avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that
+ avail_out is greater than six to avoid repeated flush markers due to
+ avail_out == 0 on return.
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there was
+ enough output space; if deflate returns with Z_OK, this function must be
+ called again with Z_FINISH and more output space (updated avail_out) but no
+ more input data, until it returns with Z_STREAM_END or an error. After
+ deflate has returned Z_STREAM_END, the only possible operations on the stream
+ are deflateReset or deflateEnd.
+
+ Z_FINISH can be used immediately after deflateInit if all the compression
+ is to be done in a single step. In this case, avail_out must be at least the
+ value returned by deflateBound (see below). If deflate does not return
+ Z_STREAM_END, then it must be called again as described above.
+
+ deflate() sets strm->adler to the adler32 checksum of all input read
+ so far (that is, total_in bytes).
+
+ deflate() may update strm->data_type if it can make a good guess about
+ the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered
+ binary. This field is only for information purposes and does not affect the
+ compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was Z_NULL), Z_BUF_ERROR if no progress is possible
+ (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not
+ fatal, and deflate() can be called again with more input and more output
+ space to continue compressing.
+*/
+
+
+ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any pending
+ output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case, msg
+ may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+/*
+ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm));
+
+ Initializes the internal stream state for decompression. The fields
+ next_in, avail_in, zalloc, zfree and opaque must be initialized before by
+ the caller. If next_in is not Z_NULL and avail_in is large enough (the
+ exact value depends on the compression method), inflateInit determines the
+ compression method from the zlib header and allocates all data structures
+ accordingly; otherwise the allocation will be deferred to the first call of
+ inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to
+ use default allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller, or Z_STREAM_ERROR if the parameters are
+ invalid, such as a null pointer to the structure. msg is set to null if
+ there is no error message. inflateInit does not perform any decompression
+ apart from possibly reading the zlib header if present: actual decompression
+ will be done by inflate(). (So next_in and avail_in may be modified, but
+ next_out and avail_out are unused and unchanged.) The current implementation
+ of inflateInit() does not process any header information -- that is deferred
+ until inflate() is called.
+*/
+
+
+ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush));
+/*
+ inflate decompresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce
+ some output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. inflate performs one or both of the
+ following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing will
+ resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there is
+ no more input data or no more space in the output buffer (see below about
+ the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming more
+ output, and updating the next_* and avail_* values accordingly. The
+ application can consume the uncompressed output when it wants, for example
+ when the output buffer is full (avail_out == 0), or after each call of
+ inflate(). If inflate returns Z_OK and with zero avail_out, it must be
+ called again after making room in the output buffer because there might be
+ more output pending.
+
+ The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH,
+ Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much
+ output as possible to the output buffer. Z_BLOCK requests that inflate()
+ stop if and when it gets to the next deflate block boundary. When decoding
+ the zlib or gzip format, this will cause inflate() to return immediately
+ after the header and before the first block. When doing a raw inflate,
+ inflate() will go ahead and process the first block, and will return when it
+ gets to the end of that block, or when it runs out of data.
+
+ The Z_BLOCK option assists in appending to or combining deflate streams.
+ Also to assist in this, on return inflate() will set strm->data_type to the
+ number of unused bits in the last byte taken from strm->next_in, plus 64 if
+ inflate() is currently decoding the last block in the deflate stream, plus
+ 128 if inflate() returned immediately after decoding an end-of-block code or
+ decoding the complete header up to just before the first byte of the deflate
+ stream. The end-of-block will not be indicated until all of the uncompressed
+ data from that block has been written to strm->next_out. The number of
+ unused bits may in general be greater than seven, except when bit 7 of
+ data_type is set, in which case the number of unused bits will be less than
+ eight. data_type is set as noted here every time inflate() returns for all
+ flush options, and so can be used to determine the amount of currently
+ consumed input in bits.
+
+ The Z_TREES option behaves as Z_BLOCK does, but it also returns when the
+ end of each deflate block header is reached, before any actual data in that
+ block is decoded. This allows the caller to determine the length of the
+ deflate block header for later use in random access within a deflate block.
+ 256 is added to the value of strm->data_type when inflate() returns
+ immediately after reaching the end of the deflate block header.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step (a
+ single call of inflate), the parameter flush should be set to Z_FINISH. In
+ this case all pending input is processed and all pending output is flushed;
+ avail_out must be large enough to hold all the uncompressed data. (The size
+ of the uncompressed data may have been saved by the compressor for this
+ purpose.) The next operation on this stream must be inflateEnd to deallocate
+ the decompression state. The use of Z_FINISH is never required, but can be
+ used to inform inflate that a faster approach may be used for the single
+ inflate() call.
+
+ In this implementation, inflate() always flushes as much output as
+ possible to the output buffer, and always uses the faster approach on the
+ first call. So the only effect of the flush parameter in this implementation
+ is on the return value of inflate(), as noted below, or when it returns early
+ because Z_BLOCK or Z_TREES is used.
+
+ If a preset dictionary is needed after this call (see inflateSetDictionary
+ below), inflate sets strm->adler to the adler32 checksum of the dictionary
+ chosen by the compressor and returns Z_NEED_DICT; otherwise it sets
+ strm->adler to the adler32 checksum of all output produced so far (that is,
+ total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described
+ below. At the end of the stream, inflate() checks that its computed adler32
+ checksum is equal to that saved by the compressor and returns Z_STREAM_END
+ only if the checksum is correct.
+
+ inflate() can decompress and check either zlib-wrapped or gzip-wrapped
+ deflate data. The header type is detected automatically, if requested when
+ initializing with inflateInit2(). Any information contained in the gzip
+ header is not retained, so applications that need that information should
+ instead use raw inflate, see inflateInit2() below, or inflateBack() and
+ perform their own processing of the gzip header and trailer.
+
+ inflate() returns Z_OK if some progress has been made (more input processed
+ or more output produced), Z_STREAM_END if the end of the compressed data has
+ been reached and all uncompressed output has been produced, Z_NEED_DICT if a
+ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
+ corrupted (input stream not conforming to the zlib format or incorrect check
+ value), Z_STREAM_ERROR if the stream structure was inconsistent (for example
+ next_in or next_out was Z_NULL), Z_MEM_ERROR if there was not enough memory,
+ Z_BUF_ERROR if no progress is possible or if there was not enough room in the
+ output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and
+ inflate() can be called again with more input and more output space to
+ continue decompressing. If Z_DATA_ERROR is returned, the application may
+ then call inflateSync() to look for a good compression block if a partial
+ recovery of the data is desired.
+*/
+
+
+ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any pending
+ output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy));
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by the
+ caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library.
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library. Larger values of this parameter result in better
+ compression at the expense of memory usage. The default value is 15 if
+ deflateInit is used instead.
+
+ windowBits can also be -8..-15 for raw deflate. In this case, -windowBits
+ determines the window size. deflate() will then generate raw deflate data
+ with no zlib header or trailer, and will not compute an adler32 check value.
+
+ windowBits can also be greater than 15 for optional gzip encoding. Add
+ 16 to windowBits to write a simple gzip header and trailer around the
+ compressed data instead of a zlib wrapper. The gzip header will have no
+ file name, no extra data, no comment, no modification time (set to zero), no
+ header crc, and the operating system will be set to 255 (unknown). If a
+ gzip stream is being written, strm->adler is a crc32 instead of an adler32.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but is
+ slow and reduces compression ratio; memLevel=9 uses maximum memory for
+ optimal speed. The default value is 8. See zconf.h for total memory usage
+ as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match), or Z_RLE to limit match distances to one (run-length
+ encoding). Filtered data consists mostly of small values with a somewhat
+ random distribution. In this case, the compression algorithm is tuned to
+ compress them better. The effect of Z_FILTERED is to force more Huffman
+ coding and less string matching; it is somewhat intermediate between
+ Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as
+ fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The
+ strategy parameter only affects the compression ratio but not the
+ correctness of the compressed output even if it is not set appropriately.
+ Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler
+ decoder for special applications.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid
+ method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is
+ incompatible with the version assumed by the caller (ZLIB_VERSION). msg is
+ set to null if there is no error message. deflateInit2 does not perform any
+ compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the compression dictionary from the given byte sequence
+ without producing any compressed output. This function must be called
+ immediately after deflateInit, deflateInit2 or deflateReset, before any call
+ of deflate. The compressor and decompressor must use exactly the same
+ dictionary (see inflateSetDictionary).
+
+ The dictionary should consist of strings (byte sequences) that are likely
+ to be encountered later in the data to be compressed, with the most commonly
+ used strings preferably put towards the end of the dictionary. Using a
+ dictionary is most useful when the data to be compressed is short and can be
+ predicted with good accuracy; the data can then be compressed better than
+ with the default empty dictionary.
+
+ Depending on the size of the compression data structures selected by
+ deflateInit or deflateInit2, a part of the dictionary may in effect be
+ discarded, for example if the dictionary is larger than the window size
+ provided in deflateInit or deflateInit2. Thus the strings most likely to be
+ useful should be put at the end of the dictionary, not at the front. In
+ addition, the current implementation of deflate will use at most the window
+ size minus 262 bytes of the provided dictionary.
+
+ Upon return of this function, strm->adler is set to the adler32 value
+ of the dictionary; the decompressor may later use this value to determine
+ which dictionary has been used by the compressor. (The adler32 value
+ applies to the whole dictionary even if only a subset of the dictionary is
+ actually used by the compressor.) If a raw deflate was requested, then the
+ adler32 value is not computed and strm->adler is not set.
+
+ deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
+ parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is
+ inconsistent (for example if deflate has already been called for this stream
+ or if the compression method is bsort). deflateSetDictionary does not
+ perform any compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream.
+
+ This function can be useful when several compression strategies will be
+ tried, for example when there are several ways of pre-processing the input
+ data with a filter. The streams that will be discarded should then be freed
+ by calling deflateEnd. Note that deflateCopy duplicates the internal
+ compression state which can be quite large, so this strategy is slow and can
+ consume lots of memory.
+
+ deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being Z_NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to deflateEnd followed by deflateInit,
+ but does not free and reallocate all the internal compression state. The
+ stream will keep the same compression level and any other attributes that
+ may have been set by deflateInit2.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being Z_NULL).
+*/
+
+ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm,
+ int level,
+ int strategy));
+/*
+ Dynamically update the compression level and compression strategy. The
+ interpretation of level and strategy is as in deflateInit2. This can be
+ used to switch between compression and straight copy of the input data, or
+ to switch to a different kind of input data requiring a different strategy.
+ If the compression level is changed, the input available so far is
+ compressed with the old level (and may be flushed); the new level will take
+ effect only at the next call of deflate().
+
+ Before the call of deflateParams, the stream state must be set as for
+ a call of deflate(), since the currently available input may have to be
+ compressed and flushed. In particular, strm->avail_out must be non-zero.
+
+ deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
+ stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR if
+ strm->avail_out was zero.
+*/
+
+ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm,
+ int good_length,
+ int max_lazy,
+ int nice_length,
+ int max_chain));
+/*
+ Fine tune deflate's internal compression parameters. This should only be
+ used by someone who understands the algorithm used by zlib's deflate for
+ searching for the best matching string, and even then only by the most
+ fanatic optimizer trying to squeeze out the last compressed bit for their
+ specific input data. Read the deflate.c source code for the meaning of the
+ max_lazy, good_length, nice_length, and max_chain parameters.
+
+ deflateTune() can be called after deflateInit() or deflateInit2(), and
+ returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream.
+ */
+
+ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm,
+ uLong sourceLen));
+/*
+ deflateBound() returns an upper bound on the compressed size after
+ deflation of sourceLen bytes. It must be called after deflateInit() or
+ deflateInit2(), and after deflateSetHeader(), if used. This would be used
+ to allocate an output buffer for deflation in a single pass, and so would be
+ called before deflate().
+*/
+
+ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm,
+ int bits,
+ int value));
+/*
+ deflatePrime() inserts bits in the deflate output stream. The intent
+ is that this function is used to start off the deflate output with the bits
+ leftover from a previous deflate stream when appending to it. As such, this
+ function can only be used for raw deflate, and must be used before the first
+ deflate() call after a deflateInit2() or deflateReset(). bits must be less
+ than or equal to 16, and that many of the least significant bits of value
+ will be inserted in the output.
+
+ deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm,
+ gz_headerp head));
+/*
+ deflateSetHeader() provides gzip header information for when a gzip
+ stream is requested by deflateInit2(). deflateSetHeader() may be called
+ after deflateInit2() or deflateReset() and before the first call of
+ deflate(). The text, time, os, extra field, name, and comment information
+ in the provided gz_header structure are written to the gzip header (xflag is
+ ignored -- the extra flags are set according to the compression level). The
+ caller must assure that, if not Z_NULL, name and comment are terminated with
+ a zero byte, and that if extra is not Z_NULL, that extra_len bytes are
+ available there. If hcrc is true, a gzip header crc is included. Note that
+ the current versions of the command-line version of gzip (up through version
+ 1.3.x) do not support header crc's, and will report that it is a "multi-part
+ gzip file" and give up.
+
+ If deflateSetHeader is not used, the default gzip header has text false,
+ the time set to zero, and os set to 255, with no extra, name, or comment
+ fields. The gzip header is returned to the default state by deflateReset().
+
+ deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+/*
+ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
+ int windowBits));
+
+ This is another version of inflateInit with an extra parameter. The
+ fields next_in, avail_in, zalloc, zfree and opaque must be initialized
+ before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library. The default value is 15 if inflateInit is used
+ instead. windowBits must be greater than or equal to the windowBits value
+ provided to deflateInit2() while compressing, or it must be equal to 15 if
+ deflateInit2() was not used. If a compressed stream with a larger window
+ size is given as input, inflate() will return with the error code
+ Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ windowBits can also be zero to request that inflate use the window size in
+ the zlib header of the compressed stream.
+
+ windowBits can also be -8..-15 for raw inflate. In this case, -windowBits
+ determines the window size. inflate() will then process raw deflate data,
+ not looking for a zlib or gzip header, not generating a check value, and not
+ looking for any check values for comparison at the end of the stream. This
+ is for use with other formats that use the deflate compressed data format
+ such as zip. Those formats provide their own check values. If a custom
+ format is developed using the raw deflate format for compressed data, it is
+ recommended that a check value such as an adler32 or a crc32 be applied to
+ the uncompressed data as is done in the zlib, gzip, and zip formats. For
+ most applications, the zlib format should be used as is. Note that comments
+ above on the use in deflateInit2() applies to the magnitude of windowBits.
+
+ windowBits can also be greater than 15 for optional gzip decoding. Add
+ 32 to windowBits to enable zlib and gzip decoding with automatic header
+ detection, or add 16 to decode only the gzip format (the zlib format will
+ return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a
+ crc32 instead of an adler32.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller, or Z_STREAM_ERROR if the parameters are
+ invalid, such as a null pointer to the structure. msg is set to null if
+ there is no error message. inflateInit2 does not perform any decompression
+ apart from possibly reading the zlib header if present: actual decompression
+ will be done by inflate(). (So next_in and avail_in may be modified, but
+ next_out and avail_out are unused and unchanged.) The current implementation
+ of inflateInit2() does not process any header information -- that is
+ deferred until inflate() is called.
+*/
+
+ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the decompression dictionary from the given uncompressed byte
+ sequence. This function must be called immediately after a call of inflate,
+ if that call returned Z_NEED_DICT. The dictionary chosen by the compressor
+ can be determined from the adler32 value returned by that call of inflate.
+ The compressor and decompressor must use exactly the same dictionary (see
+ deflateSetDictionary). For raw inflate, this function can be called
+ immediately after inflateInit2() or inflateReset() and before any call of
+ inflate() to set the dictionary. The application must insure that the
+ dictionary that was used for compression is provided.
+
+ inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
+ parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is
+ inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
+ expected one (incorrect adler32 value). inflateSetDictionary does not
+ perform any decompression: this will be done by subsequent calls of
+ inflate().
+*/
+
+ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm));
+/*
+ Skips invalid compressed data until a full flush point (see above the
+ description of deflate with Z_FULL_FLUSH) can be found, or until all
+ available input is skipped. No output is provided.
+
+ inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no flush point has been
+ found, or Z_STREAM_ERROR if the stream structure was inconsistent. In the
+ success case, the application may save the current current value of total_in
+ which indicates where valid compressed data was found. In the error case,
+ the application may repeatedly call inflateSync, providing more input each
+ time, until success or end of the input data.
+*/
+
+ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream.
+
+ This function can be useful when randomly accessing a large stream. The
+ first pass through the stream can periodically record the inflate state,
+ allowing restarting inflate at those points when randomly accessing the
+ stream.
+
+ inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being Z_NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state. The
+ stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being Z_NULL).
+*/
+
+ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm,
+ int windowBits));
+/*
+ This function is the same as inflateReset, but it also permits changing
+ the wrap and window size requests. The windowBits parameter is interpreted
+ the same as it is for inflateInit2.
+
+ inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being Z_NULL), or if
+ the windowBits parameter is invalid.
+*/
+
+ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm,
+ int bits,
+ int value));
+/*
+ This function inserts bits in the inflate input stream. The intent is
+ that this function is used to start inflating at a bit position in the
+ middle of a byte. The provided bits will be used before any bytes are used
+ from next_in. This function should only be used with raw inflate, and
+ should be used before the first inflate() call after inflateInit2() or
+ inflateReset(). bits must be less than or equal to 16, and that many of the
+ least significant bits of value will be inserted in the input.
+
+ If bits is negative, then the input stream bit buffer is emptied. Then
+ inflatePrime() can be called again to put bits in the buffer. This is used
+ to clear out bits leftover after feeding inflate a block description prior
+ to feeding inflate codes.
+
+ inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm));
+/*
+ This function returns two values, one in the lower 16 bits of the return
+ value, and the other in the remaining upper bits, obtained by shifting the
+ return value down 16 bits. If the upper value is -1 and the lower value is
+ zero, then inflate() is currently decoding information outside of a block.
+ If the upper value is -1 and the lower value is non-zero, then inflate is in
+ the middle of a stored block, with the lower value equaling the number of
+ bytes from the input remaining to copy. If the upper value is not -1, then
+ it is the number of bits back from the current bit position in the input of
+ the code (literal or length/distance pair) currently being processed. In
+ that case the lower value is the number of bytes already emitted for that
+ code.
+
+ A code is being processed if inflate is waiting for more input to complete
+ decoding of the code, or if it has completed decoding but is waiting for
+ more output space to write the literal or match data.
+
+ inflateMark() is used to mark locations in the input data for random
+ access, which may be at bit positions, and to note those cases where the
+ output of a code may span boundaries of random access blocks. The current
+ location in the input stream can be determined from avail_in and data_type
+ as noted in the description for the Z_BLOCK flush parameter for inflate.
+
+ inflateMark returns the value noted above or -1 << 16 if the provided
+ source stream state was inconsistent.
+*/
+
+ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm,
+ gz_headerp head));
+/*
+ inflateGetHeader() requests that gzip header information be stored in the
+ provided gz_header structure. inflateGetHeader() may be called after
+ inflateInit2() or inflateReset(), and before the first call of inflate().
+ As inflate() processes the gzip stream, head->done is zero until the header
+ is completed, at which time head->done is set to one. If a zlib stream is
+ being decoded, then head->done is set to -1 to indicate that there will be
+ no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be
+ used to force inflate() to return immediately after header processing is
+ complete and before any actual data is decompressed.
+
+ The text, time, xflags, and os fields are filled in with the gzip header
+ contents. hcrc is set to true if there is a header CRC. (The header CRC
+ was valid if done is set to one.) If extra is not Z_NULL, then extra_max
+ contains the maximum number of bytes to write to extra. Once done is true,
+ extra_len contains the actual extra field length, and extra contains the
+ extra field, or that field truncated if extra_max is less than extra_len.
+ If name is not Z_NULL, then up to name_max characters are written there,
+ terminated with a zero unless the length is greater than name_max. If
+ comment is not Z_NULL, then up to comm_max characters are written there,
+ terminated with a zero unless the length is greater than comm_max. When any
+ of extra, name, or comment are not Z_NULL and the respective field is not
+ present in the header, then that field is set to Z_NULL to signal its
+ absence. This allows the use of deflateSetHeader() with the returned
+ structure to duplicate the header. However if those fields are set to
+ allocated memory, then the application will need to save those pointers
+ elsewhere so that they can be eventually freed.
+
+ If inflateGetHeader is not used, then the header information is simply
+ discarded. The header is always checked for validity, including the header
+ CRC if present. inflateReset() will reset the process to discard the header
+ information. The application would need to call inflateGetHeader() again to
+ retrieve the header from the next gzip stream.
+
+ inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+/*
+ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits,
+ unsigned char FAR *window));
+
+ Initialize the internal stream state for decompression using inflateBack()
+ calls. The fields zalloc, zfree and opaque in strm must be initialized
+ before the call. If zalloc and zfree are Z_NULL, then the default library-
+ derived memory allocation routines are used. windowBits is the base two
+ logarithm of the window size, in the range 8..15. window is a caller
+ supplied buffer of that size. Except for special applications where it is
+ assured that deflate was used with small window sizes, windowBits must be 15
+ and a 32K byte window must be supplied to be able to decompress general
+ deflate streams.
+
+ See inflateBack() for the usage of these routines.
+
+ inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of
+ the paramaters are invalid, Z_MEM_ERROR if the internal state could not be
+ allocated, or Z_VERSION_ERROR if the version of the library does not match
+ the version of the header file.
+*/
+
+typedef unsigned (*in_func) OF((void FAR *, unsigned char FAR * FAR *));
+typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned));
+
+ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm,
+ in_func in, void FAR *in_desc,
+ out_func out, void FAR *out_desc));
+/*
+ inflateBack() does a raw inflate with a single call using a call-back
+ interface for input and output. This is more efficient than inflate() for
+ file i/o applications in that it avoids copying between the output and the
+ sliding window by simply making the window itself the output buffer. This
+ function trusts the application to not change the output buffer passed by
+ the output function, at least until inflateBack() returns.
+
+ inflateBackInit() must be called first to allocate the internal state
+ and to initialize the state with the user-provided window buffer.
+ inflateBack() may then be used multiple times to inflate a complete, raw
+ deflate stream with each call. inflateBackEnd() is then called to free the
+ allocated state.
+
+ A raw deflate stream is one with no zlib or gzip header or trailer.
+ This routine would normally be used in a utility that reads zip or gzip
+ files and writes out uncompressed files. The utility would decode the
+ header and process the trailer on its own, hence this routine expects only
+ the raw deflate stream to decompress. This is different from the normal
+ behavior of inflate(), which expects either a zlib or gzip header and
+ trailer around the deflate stream.
+
+ inflateBack() uses two subroutines supplied by the caller that are then
+ called by inflateBack() for input and output. inflateBack() calls those
+ routines until it reads a complete deflate stream and writes out all of the
+ uncompressed data, or until it encounters an error. The function's
+ parameters and return types are defined above in the in_func and out_func
+ typedefs. inflateBack() will call in(in_desc, &buf) which should return the
+ number of bytes of provided input, and a pointer to that input in buf. If
+ there is no input available, in() must return zero--buf is ignored in that
+ case--and inflateBack() will return a buffer error. inflateBack() will call
+ out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out()
+ should return zero on success, or non-zero on failure. If out() returns
+ non-zero, inflateBack() will return with an error. Neither in() nor out()
+ are permitted to change the contents of the window provided to
+ inflateBackInit(), which is also the buffer that out() uses to write from.
+ The length written by out() will be at most the window size. Any non-zero
+ amount of input may be provided by in().
+
+ For convenience, inflateBack() can be provided input on the first call by
+ setting strm->next_in and strm->avail_in. If that input is exhausted, then
+ in() will be called. Therefore strm->next_in must be initialized before
+ calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called
+ immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in
+ must also be initialized, and then if strm->avail_in is not zero, input will
+ initially be taken from strm->next_in[0 .. strm->avail_in - 1].
+
+ The in_desc and out_desc parameters of inflateBack() is passed as the
+ first parameter of in() and out() respectively when they are called. These
+ descriptors can be optionally used to pass any information that the caller-
+ supplied in() and out() functions need to do their job.
+
+ On return, inflateBack() will set strm->next_in and strm->avail_in to
+ pass back any unused input that was provided by the last in() call. The
+ return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR
+ if in() or out() returned an error, Z_DATA_ERROR if there was a format error
+ in the deflate stream (in which case strm->msg is set to indicate the nature
+ of the error), or Z_STREAM_ERROR if the stream was not properly initialized.
+ In the case of Z_BUF_ERROR, an input or output error can be distinguished
+ using strm->next_in which will be Z_NULL only if in() returned an error. If
+ strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning
+ non-zero. (in() will always be called before out(), so strm->next_in is
+ assured to be defined if out() returns non-zero.) Note that inflateBack()
+ cannot return Z_OK.
+*/
+
+ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm));
+/*
+ All memory allocated by inflateBackInit() is freed.
+
+ inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream
+ state was inconsistent.
+*/
+
+ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void));
+/* Return flags indicating compile-time options.
+
+ Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other:
+ 1.0: size of uInt
+ 3.2: size of uLong
+ 5.4: size of voidpf (pointer)
+ 7.6: size of z_off_t
+
+ Compiler, assembler, and debug options:
+ 8: DEBUG
+ 9: ASMV or ASMINF -- use ASM code
+ 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention
+ 11: 0 (reserved)
+
+ One-time table building (smaller code, but not thread-safe if true):
+ 12: BUILDFIXED -- build static block decoding tables when needed
+ 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed
+ 14,15: 0 (reserved)
+
+ Library content (indicates missing functionality):
+ 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking
+ deflate code when not needed)
+ 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect
+ and decode gzip streams (to avoid linking crc code)
+ 18-19: 0 (reserved)
+
+ Operation variations (changes in library functionality):
+ 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate
+ 21: FASTEST -- deflate algorithm with only one, lowest compression level
+ 22,23: 0 (reserved)
+
+ The sprintf variant used by gzprintf (zero is best):
+ 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format
+ 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure!
+ 26: 0 = returns value, 1 = void -- 1 means inferred string length returned
+
+ Remainder:
+ 27-31: 0 (reserved)
+ */
+
+
+ /* utility functions */
+
+/*
+ The following utility functions are implemented on top of the basic
+ stream-oriented functions. To simplify the interface, some default options
+ are assumed (compression level and memory usage, standard memory allocation
+ functions). The source code of these utility functions can be modified if
+ you need special options.
+*/
+
+ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Compresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total size
+ of the destination buffer, which must be at least the value returned by
+ compressBound(sourceLen). Upon exit, destLen is the actual size of the
+ compressed buffer.
+
+ compress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer.
+*/
+
+ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen,
+ int level));
+/*
+ Compresses the source buffer into the destination buffer. The level
+ parameter has the same meaning as in deflateInit. sourceLen is the byte
+ length of the source buffer. Upon entry, destLen is the total size of the
+ destination buffer, which must be at least the value returned by
+ compressBound(sourceLen). Upon exit, destLen is the actual size of the
+ compressed buffer.
+
+ compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_BUF_ERROR if there was not enough room in the output buffer,
+ Z_STREAM_ERROR if the level parameter is invalid.
+*/
+
+ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen));
+/*
+ compressBound() returns an upper bound on the compressed size after
+ compress() or compress2() on sourceLen bytes. It would be used before a
+ compress() or compress2() call to allocate the destination buffer.
+*/
+
+ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Decompresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total size
+ of the destination buffer, which must be large enough to hold the entire
+ uncompressed data. (The size of the uncompressed data must have been saved
+ previously by the compressor and transmitted to the decompressor by some
+ mechanism outside the scope of this compression library.) Upon exit, destLen
+ is the actual size of the uncompressed buffer.
+
+ uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete.
+*/
+
+
+ /* gzip file access functions */
+
+/*
+ This library supports reading and writing files in gzip (.gz) format with
+ an interface similar to that of stdio, using the functions that start with
+ "gz". The gzip format is different from the zlib format. gzip is a gzip
+ wrapper, documented in RFC 1952, wrapped around a deflate stream.
+*/
+
+typedef voidp gzFile; /* opaque gzip file descriptor */
+
+/*
+ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode));
+
+ Opens a gzip (.gz) file for reading or writing. The mode parameter is as
+ in fopen ("rb" or "wb") but can also include a compression level ("wb9") or
+ a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only
+ compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F'
+ for fixed code compression as in "wb9F". (See the description of
+ deflateInit2 for more information about the strategy parameter.) Also "a"
+ can be used instead of "w" to request that the gzip stream that will be
+ written be appended to the file. "+" will result in an error, since reading
+ and writing to the same gzip file is not supported.
+
+ gzopen can be used to read a file which is not in gzip format; in this
+ case gzread will directly read from the file without decompression.
+
+ gzopen returns NULL if the file could not be opened, if there was
+ insufficient memory to allocate the gzFile state, or if an invalid mode was
+ specified (an 'r', 'w', or 'a' was not provided, or '+' was provided).
+ errno can be checked to determine if the reason gzopen failed was that the
+ file could not be opened.
+*/
+
+ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode));
+/*
+ gzdopen associates a gzFile with the file descriptor fd. File descriptors
+ are obtained from calls like open, dup, creat, pipe or fileno (if the file
+ has been previously opened with fopen). The mode parameter is as in gzopen.
+
+ The next call of gzclose on the returned gzFile will also close the file
+ descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor
+ fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd,
+ mode);. The duplicated descriptor should be saved to avoid a leak, since
+ gzdopen does not close fd if it fails.
+
+ gzdopen returns NULL if there was insufficient memory to allocate the
+ gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not
+ provided, or '+' was provided), or if fd is -1. The file descriptor is not
+ used until the next gz* read, write, seek, or close operation, so gzdopen
+ will not detect if fd is invalid (unless fd is -1).
+*/
+
+ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size));
+/*
+ Set the internal buffer size used by this library's functions. The
+ default buffer size is 8192 bytes. This function must be called after
+ gzopen() or gzdopen(), and before any other calls that read or write the
+ file. The buffer memory allocation is always deferred to the first read or
+ write. Two buffers are allocated, either both of the specified size when
+ writing, or one of the specified size and the other twice that size when
+ reading. A larger buffer size of, for example, 64K or 128K bytes will
+ noticeably increase the speed of decompression (reading).
+
+ The new buffer size also affects the maximum length for gzprintf().
+
+ gzbuffer() returns 0 on success, or -1 on failure, such as being called
+ too late.
+*/
+
+ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy));
+/*
+ Dynamically update the compression level or strategy. See the description
+ of deflateInit2 for the meaning of these parameters.
+
+ gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not
+ opened for writing.
+*/
+
+ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len));
+/*
+ Reads the given number of uncompressed bytes from the compressed file. If
+ the input file was not in gzip format, gzread copies the given number of
+ bytes into the buffer.
+
+ After reaching the end of a gzip stream in the input, gzread will continue
+ to read, looking for another gzip stream, or failing that, reading the rest
+ of the input file directly without decompression. The entire input file
+ will be read if gzread is called until it returns less than the requested
+ len.
+
+ gzread returns the number of uncompressed bytes actually read, less than
+ len for end of file, or -1 for error.
+*/
+
+ZEXTERN int ZEXPORT gzwrite OF((gzFile file,
+ voidpc buf, unsigned len));
+/*
+ Writes the given number of uncompressed bytes into the compressed file.
+ gzwrite returns the number of uncompressed bytes written or 0 in case of
+ error.
+*/
+
+ZEXTERN int ZEXPORTVA gzprintf OF((gzFile file, const char *format, ...));
+/*
+ Converts, formats, and writes the arguments to the compressed file under
+ control of the format string, as in fprintf. gzprintf returns the number of
+ uncompressed bytes actually written, or 0 in case of error. The number of
+ uncompressed bytes written is limited to 8191, or one less than the buffer
+ size given to gzbuffer(). The caller should assure that this limit is not
+ exceeded. If it is exceeded, then gzprintf() will return an error (0) with
+ nothing written. In this case, there may also be a buffer overflow with
+ unpredictable consequences, which is possible only if zlib was compiled with
+ the insecure functions sprintf() or vsprintf() because the secure snprintf()
+ or vsnprintf() functions were not available. This can be determined using
+ zlibCompileFlags().
+*/
+
+ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s));
+/*
+ Writes the given null-terminated string to the compressed file, excluding
+ the terminating null character.
+
+ gzputs returns the number of characters written, or -1 in case of error.
+*/
+
+ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len));
+/*
+ Reads bytes from the compressed file until len-1 characters are read, or a
+ newline character is read and transferred to buf, or an end-of-file
+ condition is encountered. If any characters are read or if len == 1, the
+ string is terminated with a null character. If no characters are read due
+ to an end-of-file or len < 1, then the buffer is left untouched.
+
+ gzgets returns buf which is a null-terminated string, or it returns NULL
+ for end-of-file or in case of error. If there was an error, the contents at
+ buf are indeterminate.
+*/
+
+ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c));
+/*
+ Writes c, converted to an unsigned char, into the compressed file. gzputc
+ returns the value that was written, or -1 in case of error.
+*/
+
+ZEXTERN int ZEXPORT gzgetc OF((gzFile file));
+/*
+ Reads one byte from the compressed file. gzgetc returns this byte or -1
+ in case of end of file or error.
+*/
+
+ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file));
+/*
+ Push one character back onto the stream to be read as the first character
+ on the next read. At least one character of push-back is allowed.
+ gzungetc() returns the character pushed, or -1 on failure. gzungetc() will
+ fail if c is -1, and may fail if a character has been pushed but not read
+ yet. If gzungetc is used immediately after gzopen or gzdopen, at least the
+ output buffer size of pushed characters is allowed. (See gzbuffer above.)
+ The pushed character will be discarded if the stream is repositioned with
+ gzseek() or gzrewind().
+*/
+
+ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush));
+/*
+ Flushes all pending output into the compressed file. The parameter flush
+ is as in the deflate() function. The return value is the zlib error number
+ (see function gzerror below). gzflush is only permitted when writing.
+
+ If the flush parameter is Z_FINISH, the remaining data is written and the
+ gzip stream is completed in the output. If gzwrite() is called again, a new
+ gzip stream will be started in the output. gzread() is able to read such
+ concatented gzip streams.
+
+ gzflush should be called only when strictly necessary because it will
+ degrade compression if called too often.
+*/
+
+/*
+ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file,
+ z_off_t offset, int whence));
+
+ Sets the starting position for the next gzread or gzwrite on the given
+ compressed file. The offset represents a number of bytes in the
+ uncompressed data stream. The whence parameter is defined as in lseek(2);
+ the value SEEK_END is not supported.
+
+ If the file is opened for reading, this function is emulated but can be
+ extremely slow. If the file is opened for writing, only forward seeks are
+ supported; gzseek then compresses a sequence of zeroes up to the new
+ starting position.
+
+ gzseek returns the resulting offset location as measured in bytes from
+ the beginning of the uncompressed stream, or -1 in case of error, in
+ particular if the file is opened for writing and the new starting position
+ would be before the current position.
+*/
+
+ZEXTERN int ZEXPORT gzrewind OF((gzFile file));
+/*
+ Rewinds the given file. This function is supported only for reading.
+
+ gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET)
+*/
+
+/*
+ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file));
+
+ Returns the starting position for the next gzread or gzwrite on the given
+ compressed file. This position represents a number of bytes in the
+ uncompressed data stream, and is zero when starting, even if appending or
+ reading a gzip stream from the middle of a file using gzdopen().
+
+ gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR)
+*/
+
+/*
+ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file));
+
+ Returns the current offset in the file being read or written. This offset
+ includes the count of bytes that precede the gzip stream, for example when
+ appending or when using gzdopen() for reading. When reading, the offset
+ does not include as yet unused buffered input. This information can be used
+ for a progress indicator. On error, gzoffset() returns -1.
+*/
+
+ZEXTERN int ZEXPORT gzeof OF((gzFile file));
+/*
+ Returns true (1) if the end-of-file indicator has been set while reading,
+ false (0) otherwise. Note that the end-of-file indicator is set only if the
+ read tried to go past the end of the input, but came up short. Therefore,
+ just like feof(), gzeof() may return false even if there is no more data to
+ read, in the event that the last read request was for the exact number of
+ bytes remaining in the input file. This will happen if the input file size
+ is an exact multiple of the buffer size.
+
+ If gzeof() returns true, then the read functions will return no more data,
+ unless the end-of-file indicator is reset by gzclearerr() and the input file
+ has grown since the previous end of file was detected.
+*/
+
+ZEXTERN int ZEXPORT gzdirect OF((gzFile file));
+/*
+ Returns true (1) if file is being copied directly while reading, or false
+ (0) if file is a gzip stream being decompressed. This state can change from
+ false to true while reading the input file if the end of a gzip stream is
+ reached, but is followed by data that is not another gzip stream.
+
+ If the input file is empty, gzdirect() will return true, since the input
+ does not contain a gzip stream.
+
+ If gzdirect() is used immediately after gzopen() or gzdopen() it will
+ cause buffers to be allocated to allow reading the file to determine if it
+ is a gzip file. Therefore if gzbuffer() is used, it should be called before
+ gzdirect().
+*/
+
+ZEXTERN int ZEXPORT gzclose OF((gzFile file));
+/*
+ Flushes all pending output if necessary, closes the compressed file and
+ deallocates the (de)compression state. Note that once file is closed, you
+ cannot call gzerror with file, since its structures have been deallocated.
+ gzclose must not be called more than once on the same file, just as free
+ must not be called more than once on the same allocation.
+
+ gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a
+ file operation error, or Z_OK on success.
+*/
+
+ZEXTERN int ZEXPORT gzclose_r OF((gzFile file));
+ZEXTERN int ZEXPORT gzclose_w OF((gzFile file));
+/*
+ Same as gzclose(), but gzclose_r() is only for use when reading, and
+ gzclose_w() is only for use when writing or appending. The advantage to
+ using these instead of gzclose() is that they avoid linking in zlib
+ compression or decompression code that is not used when only reading or only
+ writing respectively. If gzclose() is used, then both compression and
+ decompression code will be included the application when linking to a static
+ zlib library.
+*/
+
+ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum));
+/*
+ Returns the error message for the last error which occurred on the given
+ compressed file. errnum is set to zlib error number. If an error occurred
+ in the file system and not in the compression library, errnum is set to
+ Z_ERRNO and the application may consult errno to get the exact error code.
+
+ The application must not modify the returned string. Future calls to
+ this function may invalidate the previously returned string. If file is
+ closed, then the string previously returned by gzerror will no longer be
+ available.
+
+ gzerror() should be used to distinguish errors from end-of-file for those
+ functions above that do not distinguish those cases in their return values.
+*/
+
+ZEXTERN void ZEXPORT gzclearerr OF((gzFile file));
+/*
+ Clears the error and end-of-file flags for file. This is analogous to the
+ clearerr() function in stdio. This is useful for continuing to read a gzip
+ file that is being written concurrently.
+*/
+
+
+ /* checksum functions */
+
+/*
+ These functions are not related to compression but are exported
+ anyway because they might be useful in applications using the compression
+ library.
+*/
+
+ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is Z_NULL, this function returns the
+ required initial value for the checksum.
+
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster.
+
+ Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+/*
+ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2,
+ z_off_t len2));
+
+ Combine two Adler-32 checksums into one. For two sequences of bytes, seq1
+ and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for
+ each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of
+ seq1 and seq2 concatenated, requiring only adler1, adler2, and len2.
+*/
+
+ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
+/*
+ Update a running CRC-32 with the bytes buf[0..len-1] and return the
+ updated CRC-32. If buf is Z_NULL, this function returns the required
+ initial value for the for the crc. Pre- and post-conditioning (one's
+ complement) is performed within this function so it shouldn't be done by the
+ application.
+
+ Usage example:
+
+ uLong crc = crc32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ crc = crc32(crc, buffer, length);
+ }
+ if (crc != original_crc) error();
+*/
+
+/*
+ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2));
+
+ Combine two CRC-32 check values into one. For two sequences of bytes,
+ seq1 and seq2 with lengths len1 and len2, CRC-32 check values were
+ calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32
+ check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and
+ len2.
+*/
+
+
+ /* various hacks, don't look :) */
+
+/* deflateInit and inflateInit are macros to allow checking the zlib version
+ * and the compiler's view of z_stream:
+ */
+ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
+ int windowBits, int memLevel,
+ int strategy, const char *version,
+ int stream_size));
+ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits,
+ unsigned char FAR *window,
+ const char *version,
+ int stream_size));
+#define deflateInit(strm, level) \
+ deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+#define inflateInit(strm) \
+ inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
+#define inflateInit2(strm, windowBits) \
+ inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
+#define inflateBackInit(strm, windowBits, window) \
+ inflateBackInit_((strm), (windowBits), (window), \
+ ZLIB_VERSION, sizeof(z_stream))
+
+/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or
+ * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if
+ * both are true, the application gets the *64 functions, and the regular
+ * functions are changed to 64 bits) -- in case these are set on systems
+ * without large file support, _LFS64_LARGEFILE must also be true
+ */
+#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
+ ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
+ ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));
+ ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile));
+ ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile));
+ ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t));
+ ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t));
+#endif
+
+#if !defined(ZLIB_INTERNAL) && _FILE_OFFSET_BITS-0 == 64 && _LFS64_LARGEFILE-0
+# define gzopen gzopen64
+# define gzseek gzseek64
+# define gztell gztell64
+# define gzoffset gzoffset64
+# define adler32_combine adler32_combine64
+# define crc32_combine crc32_combine64
+# ifdef _LARGEFILE64_SOURCE
+ ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
+ ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int));
+ ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile));
+ ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile));
+ ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
+# endif
+#else
+ ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *));
+ ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int));
+ ZEXTERN z_off_t ZEXPORT gztell OF((gzFile));
+ ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile));
+ ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t));
+#endif
+
+/* hack for buggy compilers */
+#if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL)
+ struct internal_state {int dummy;};
+#endif
+
+/* undocumented functions */
+ZEXTERN const char * ZEXPORT zError OF((int));
+ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp));
+ZEXTERN const uLongf * ZEXPORT get_crc_table OF((void));
+ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZLIB_H */
--- /dev/null
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995-2005, 2010 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+const char * const z_errmsg[10] = {
+"need dictionary", /* Z_NEED_DICT 2 */
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+"incompatible version",/* Z_VERSION_ERROR (-6) */
+""};
+
+
+const char * ZEXPORT zlibVersion()
+{
+ return ZLIB_VERSION;
+}
+
+uLong ZEXPORT zlibCompileFlags()
+{
+ uLong flags;
+
+ flags = 0;
+ switch ((int)(sizeof(uInt))) {
+ case 2: break;
+ case 4: flags += 1; break;
+ case 8: flags += 2; break;
+ default: flags += 3;
+ }
+ switch ((int)(sizeof(uLong))) {
+ case 2: break;
+ case 4: flags += 1 << 2; break;
+ case 8: flags += 2 << 2; break;
+ default: flags += 3 << 2;
+ }
+ switch ((int)(sizeof(voidpf))) {
+ case 2: break;
+ case 4: flags += 1 << 4; break;
+ case 8: flags += 2 << 4; break;
+ default: flags += 3 << 4;
+ }
+ switch ((int)(sizeof(z_off_t))) {
+ case 2: break;
+ case 4: flags += 1 << 6; break;
+ case 8: flags += 2 << 6; break;
+ default: flags += 3 << 6;
+ }
+#ifdef DEBUG
+ flags += 1 << 8;
+#endif
+#if defined(ASMV) || defined(ASMINF)
+ flags += 1 << 9;
+#endif
+#ifdef ZLIB_WINAPI
+ flags += 1 << 10;
+#endif
+#ifdef BUILDFIXED
+ flags += 1 << 12;
+#endif
+#ifdef DYNAMIC_CRC_TABLE
+ flags += 1 << 13;
+#endif
+#ifdef NO_GZCOMPRESS
+ flags += 1L << 16;
+#endif
+#ifdef NO_GZIP
+ flags += 1L << 17;
+#endif
+#ifdef PKZIP_BUG_WORKAROUND
+ flags += 1L << 20;
+#endif
+#ifdef FASTEST
+ flags += 1L << 21;
+#endif
+#ifdef STDC
+# ifdef NO_vsnprintf
+ flags += 1L << 25;
+# ifdef HAS_vsprintf_void
+ flags += 1L << 26;
+# endif
+# else
+# ifdef HAS_vsnprintf_void
+ flags += 1L << 26;
+# endif
+# endif
+#else
+ flags += 1L << 24;
+# ifdef NO_snprintf
+ flags += 1L << 25;
+# ifdef HAS_sprintf_void
+ flags += 1L << 26;
+# endif
+# else
+# ifdef HAS_snprintf_void
+ flags += 1L << 26;
+# endif
+# endif
+#endif
+ return flags;
+}
+
+#ifdef DEBUG
+
+# ifndef verbose
+# define verbose 0
+# endif
+int ZLIB_INTERNAL z_verbose = verbose;
+
+void ZLIB_INTERNAL z_error (m)
+ char *m;
+{
+ fprintf(stderr, "%s\n", m);
+ exit(1);
+}
+#endif
+
+/* exported to allow conversion of error code to string for compress() and
+ * uncompress()
+ */
+const char * ZEXPORT zError(err)
+ int err;
+{
+ return ERR_MSG(err);
+}
+
+#if defined(_WIN32_WCE)
+ /* The Microsoft C Run-Time Library for Windows CE doesn't have
+ * errno. We define it as a global variable to simplify porting.
+ * Its value is always 0 and should not be used.
+ */
+ int errno = 0;
+#endif
+
+#ifndef HAVE_MEMCPY
+
+void ZLIB_INTERNAL zmemcpy(dest, source, len)
+ Bytef* dest;
+ const Bytef* source;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = *source++; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+
+int ZLIB_INTERNAL zmemcmp(s1, s2, len)
+ const Bytef* s1;
+ const Bytef* s2;
+ uInt len;
+{
+ uInt j;
+
+ for (j = 0; j < len; j++) {
+ if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
+ }
+ return 0;
+}
+
+void ZLIB_INTERNAL zmemzero(dest, len)
+ Bytef* dest;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = 0; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+#endif
+
+
+#ifdef SYS16BIT
+
+#ifdef __TURBOC__
+/* Turbo C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
+ * and farmalloc(64K) returns a pointer with an offset of 8, so we
+ * must fix the pointer. Warning: the pointer must be put back to its
+ * original form in order to free it, use zcfree().
+ */
+
+#define MAX_PTR 10
+/* 10*64K = 640K */
+
+local int next_ptr = 0;
+
+typedef struct ptr_table_s {
+ voidpf org_ptr;
+ voidpf new_ptr;
+} ptr_table;
+
+local ptr_table table[MAX_PTR];
+/* This table is used to remember the original form of pointers
+ * to large buffers (64K). Such pointers are normalized with a zero offset.
+ * Since MSDOS is not a preemptive multitasking OS, this table is not
+ * protected from concurrent access. This hack doesn't work anyway on
+ * a protected system like OS/2. Use Microsoft C instead.
+ */
+
+voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ voidpf buf = opaque; /* just to make some compilers happy */
+ ulg bsize = (ulg)items*size;
+
+ /* If we allocate less than 65520 bytes, we assume that farmalloc
+ * will return a usable pointer which doesn't have to be normalized.
+ */
+ if (bsize < 65520L) {
+ buf = farmalloc(bsize);
+ if (*(ush*)&buf != 0) return buf;
+ } else {
+ buf = farmalloc(bsize + 16L);
+ }
+ if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
+ table[next_ptr].org_ptr = buf;
+
+ /* Normalize the pointer to seg:0 */
+ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
+ *(ush*)&buf = 0;
+ table[next_ptr++].new_ptr = buf;
+ return buf;
+}
+
+void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
+{
+ int n;
+ if (*(ush*)&ptr != 0) { /* object < 64K */
+ farfree(ptr);
+ return;
+ }
+ /* Find the original pointer */
+ for (n = 0; n < next_ptr; n++) {
+ if (ptr != table[n].new_ptr) continue;
+
+ farfree(table[n].org_ptr);
+ while (++n < next_ptr) {
+ table[n-1] = table[n];
+ }
+ next_ptr--;
+ return;
+ }
+ ptr = opaque; /* just to make some compilers happy */
+ Assert(0, "zcfree: ptr not found");
+}
+
+#endif /* __TURBOC__ */
+
+
+#ifdef M_I86
+/* Microsoft C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+#if (!defined(_MSC_VER) || (_MSC_VER <= 600))
+# define _halloc halloc
+# define _hfree hfree
+#endif
+
+voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ return _halloc((long)items, size);
+}
+
+void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ _hfree(ptr);
+}
+
+#endif /* M_I86 */
+
+#endif /* SYS16BIT */
+
+
+#ifndef MY_ZCALLOC /* Any system without a special alloc function */
+
+#ifndef STDC
+extern voidp malloc OF((uInt size));
+extern voidp calloc OF((uInt items, uInt size));
+extern void free OF((voidpf ptr));
+#endif
+
+voidpf ZLIB_INTERNAL zcalloc (opaque, items, size)
+ voidpf opaque;
+ unsigned items;
+ unsigned size;
+{
+ if (opaque) items += size - size; /* make compiler happy */
+ return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) :
+ (voidpf)calloc(items, size);
+}
+
+void ZLIB_INTERNAL zcfree (opaque, ptr)
+ voidpf opaque;
+ voidpf ptr;
+{
+ free(ptr);
+ if (opaque) return; /* make compiler happy */
+}
+
+#endif /* MY_ZCALLOC */
--- /dev/null
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-2010 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef ZUTIL_H
+#define ZUTIL_H
+
+#if ((__GNUC__-0) * 10 + __GNUC_MINOR__-0 >= 33) && !defined(NO_VIZ)
+# define ZLIB_INTERNAL __attribute__((visibility ("hidden")))
+#else
+# define ZLIB_INTERNAL
+#endif
+
+#include "zlib.h"
+
+#ifdef STDC
+# if !(defined(_WIN32_WCE) && defined(_MSC_VER))
+# include <stddef.h>
+# endif
+# include <string.h>
+# include <stdlib.h>
+#endif
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
+/* (size given to avoid silly warnings with Visual C++) */
+
+#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
+
+#define ERR_RETURN(strm,err) \
+ return (strm->msg = (char*)ERR_MSG(err), (err))
+/* To be used only when the state is known to be valid */
+
+ /* common constants */
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+#if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32))
+# define OS_CODE 0x00
+# if defined(__TURBOC__) || defined(__BORLANDC__)
+# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__))
+ /* Allow compilation with ANSI keywords only enabled */
+ void _Cdecl farfree( void *block );
+ void *_Cdecl farmalloc( unsigned long nbytes );
+# else
+# include <alloc.h>
+# endif
+# else /* MSC or DJGPP */
+# include <malloc.h>
+# endif
+#endif
+
+#ifdef AMIGA
+# define OS_CODE 0x01
+#endif
+
+#if defined(VAXC) || defined(VMS)
+# define OS_CODE 0x02
+# define F_OPEN(name, mode) \
+ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
+#endif
+
+#if defined(ATARI) || defined(atarist)
+# define OS_CODE 0x05
+#endif
+
+#ifdef OS2
+# define OS_CODE 0x06
+# ifdef M_I86
+# include <malloc.h>
+# endif
+#endif
+
+#if defined(MACOS) || defined(TARGET_OS_MAC)
+# define OS_CODE 0x07
+# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os
+# include <unix.h> /* for fdopen */
+# else
+# ifndef fdopen
+# define fdopen(fd,mode) NULL /* No fdopen() */
+# endif
+# endif
+#endif
+
+#ifdef TOPS20
+# define OS_CODE 0x0a
+#endif
+
+#ifdef WIN32
+# ifndef __CYGWIN__ /* Cygwin is Unix, not Win32 */
+# define OS_CODE 0x0b
+# endif
+#endif
+
+#ifdef __50SERIES /* Prime/PRIMOS */
+# define OS_CODE 0x0f
+#endif
+
+#if defined(_BEOS_) || defined(RISCOS)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+#endif
+
+#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX
+# if defined(_WIN32_WCE)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+# ifndef _PTRDIFF_T_DEFINED
+ typedef int ptrdiff_t;
+# define _PTRDIFF_T_DEFINED
+# endif
+# else
+# define fdopen(fd,type) _fdopen(fd,type)
+# endif
+#endif
+
+#if defined(__BORLANDC__)
+ #pragma warn -8004
+ #pragma warn -8008
+ #pragma warn -8066
+#endif
+
+/* provide prototypes for these when building zlib without LFS */
+#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0
+ ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
+#endif
+
+ /* common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 0x03 /* assume Unix */
+#endif
+
+#ifndef F_OPEN
+# define F_OPEN(name, mode) fopen((name), (mode))
+#endif
+
+ /* functions */
+
+#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550)
+# ifndef HAVE_VSNPRINTF
+# define HAVE_VSNPRINTF
+# endif
+#endif
+#if defined(__CYGWIN__)
+# ifndef HAVE_VSNPRINTF
+# define HAVE_VSNPRINTF
+# endif
+#endif
+#ifndef HAVE_VSNPRINTF
+# ifdef MSDOS
+ /* vsnprintf may exist on some MS-DOS compilers (DJGPP?),
+ but for now we just assume it doesn't. */
+# define NO_vsnprintf
+# endif
+# ifdef __TURBOC__
+# define NO_vsnprintf
+# endif
+# ifdef WIN32
+ /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */
+# if !defined(vsnprintf) && !defined(NO_vsnprintf)
+# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 )
+# define vsnprintf _vsnprintf
+# endif
+# endif
+# endif
+# ifdef __SASC
+# define NO_vsnprintf
+# endif
+#endif
+#ifdef VMS
+# define NO_vsnprintf
+#endif
+
+#if defined(pyr)
+# define NO_MEMCPY
+#endif
+#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__)
+ /* Use our own functions for small and medium model with MSC <= 5.0.
+ * You may have to use the same strategy for Borland C (untested).
+ * The __SC__ check is for Symantec.
+ */
+# define NO_MEMCPY
+#endif
+#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
+# define HAVE_MEMCPY
+#endif
+#ifdef HAVE_MEMCPY
+# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
+# define zmemcpy _fmemcpy
+# define zmemcmp _fmemcmp
+# define zmemzero(dest, len) _fmemset(dest, 0, len)
+# else
+# define zmemcpy memcpy
+# define zmemcmp memcmp
+# define zmemzero(dest, len) memset(dest, 0, len)
+# endif
+#else
+ void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len));
+ int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len));
+ void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len));
+#endif
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# include <stdio.h>
+ extern int ZLIB_INTERNAL z_verbose;
+ extern void ZLIB_INTERNAL z_error OF((char *m));
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) {if (z_verbose>=0) fprintf x ;}
+# define Tracev(x) {if (z_verbose>0) fprintf x ;}
+# define Tracevv(x) {if (z_verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items,
+ unsigned size));
+void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr));
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
+#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
+
+#endif /* ZUTIL_H */